code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : int = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Tuple =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ : int =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : int =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=13, lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Dict=True, lowerCamelCase : str=False, lowerCamelCase : Any=99, lowerCamelCase : str=16, lowerCamelCase : Optional[int]=2, lowerCamelCase : Tuple=4, lowerCamelCase : Dict=4, lowerCamelCase : Any="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[Any]=32, lowerCamelCase : str=2, lowerCamelCase : Tuple=1, lowerCamelCase : List[str]=0, lowerCamelCase : Any=0.02, )-> Dict:
lowerCamelCase__ : Dict =parent
lowerCamelCase__ : int =batch_size
lowerCamelCase__ : Tuple =seq_length
lowerCamelCase__ : Optional[Any] =is_training
lowerCamelCase__ : str =use_labels
lowerCamelCase__ : Optional[int] =vocab_size
lowerCamelCase__ : Dict =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : List[str] =num_attention_heads
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : int =initializer_range
def snake_case ( self : Optional[int] )-> str:
lowerCamelCase__ : Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase__ : str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase__ : List[str] =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : List[Any] =BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Optional[Any] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =20
lowerCamelCase__ : str =model_class_name(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Any =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Optional[int] =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Any =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : str =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[str] =model.decode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Tuple =20
lowerCamelCase__ : Optional[int] =model_class_name(lowerCamelCase )
lowerCamelCase__ : List[Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Tuple =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : Tuple =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Any =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Tuple =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[Any] =model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 9_9
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : str =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase__ : List[str] =input_ids.shape[0]
lowerCamelCase__ : Dict =BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self._get_config_and_data()
lowerCamelCase__ : Optional[int] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : List[str] =lm_model(input_ids=lowerCamelCase )
lowerCamelCase__ : Tuple =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : List[Any] )-> Any:
lowerCamelCase__ : str =BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase__ : List[Any] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase__ : Tuple =lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase )
lowerCamelCase__ : str =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase__ : Any =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : Optional[Any] =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase__ : int =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
_a = True
_a = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Optional[Any] =FlaxBlenderbotSmallModelTester(self )
def snake_case ( self : Union[str, Any] )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Dict =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : List[Any], lowerCamelCase : List[str]=None, **lowerCamelCase : Any ):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Tuple =encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict =encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : int =model_class(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : str ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : str =decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : List[str] =decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Dict =model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ : str =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'upernet'
def __init__( self : Optional[Any], lowerCamelCase : Any=None, lowerCamelCase : List[Any]=512, lowerCamelCase : Any=0.02, lowerCamelCase : Any=[1, 2, 3, 6], lowerCamelCase : int=True, lowerCamelCase : Any=0.4, lowerCamelCase : Tuple=384, lowerCamelCase : Tuple=256, lowerCamelCase : int=1, lowerCamelCase : Any=False, lowerCamelCase : List[str]=255, **lowerCamelCase : Optional[Any], )-> Tuple:
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase__ : Optional[Any] =CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =backbone_config.get('''model_type''' )
lowerCamelCase__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : List[str] =config_class.from_dict(lowerCamelCase )
lowerCamelCase__ : int =backbone_config
lowerCamelCase__ : Any =hidden_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Tuple =pool_scales
lowerCamelCase__ : Optional[int] =use_auxiliary_head
lowerCamelCase__ : int =auxiliary_loss_weight
lowerCamelCase__ : List[Any] =auxiliary_in_channels
lowerCamelCase__ : Tuple =auxiliary_channels
lowerCamelCase__ : Tuple =auxiliary_num_convs
lowerCamelCase__ : List[Any] =auxiliary_concat_input
lowerCamelCase__ : Dict =loss_ignore_index
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : List[str] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Union[str, Any] =self.backbone_config.to_dict()
lowerCamelCase__ : str =self.__class__.model_type
return output
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
from math import factorial
def snake_case__ ( __lowerCamelCase : int = 20 ):
"""simple docstring"""
lowerCamelCase__ : str =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase__ : List[Any] =n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowercase : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'naver-clova-ix/donut-base-finetuned-docvqa'
_a = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_a = 'document_qa'
_a = AutoProcessor
_a = VisionEncoderDecoderModel
_a = ['image', 'text']
_a = ['text']
def __init__( self : Union[str, Any], *lowerCamelCase : int, **lowerCamelCase : List[str] )-> Optional[int]:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : "Image", lowerCamelCase : str )-> Optional[Any]:
lowerCamelCase__ : List[Any] ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCamelCase__ : Dict =task_prompt.replace('''{user_input}''', lowerCamelCase )
lowerCamelCase__ : List[Any] =self.pre_processor.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors='''pt''' ).input_ids
lowerCamelCase__ : Union[str, Any] =self.pre_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case ( self : List[str], lowerCamelCase : Tuple )-> Any:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ), decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=lowerCamelCase, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=lowerCamelCase, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=lowerCamelCase, ).sequences
def snake_case ( self : Optional[int], lowerCamelCase : Tuple )-> str:
lowerCamelCase__ : Dict =self.pre_processor.batch_decode(lowerCamelCase )[0]
lowerCamelCase__ : Optional[int] =sequence.replace(self.pre_processor.tokenizer.eos_token, '''''' )
lowerCamelCase__ : Dict =sequence.replace(self.pre_processor.tokenizer.pad_token, '''''' )
lowerCamelCase__ : Optional[int] =re.sub(r'''<.*?>''', '''''', lowerCamelCase, count=1 ).strip() # remove first task start token
lowerCamelCase__ : int =self.pre_processor.tokenajson(lowerCamelCase )
return sequence["answer"]
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 1000 ):
"""simple docstring"""
lowerCamelCase__ : str =3
lowerCamelCase__ : Optional[int] =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = CpmAntTokenizer
_a = False
def snake_case ( self : List[Any] )-> Union[str, Any]:
super().setUp()
lowerCamelCase__ : Optional[int] =[
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
lowerCamelCase__ : List[Any] =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
lowerCamelCase__ : Dict ='''今天天气真好!'''
lowerCamelCase__ : Dict =['''今天''', '''天气''', '''真''', '''好''', '''!''']
lowerCamelCase__ : List[str] =tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ='''今天天气真好!'''
lowerCamelCase__ : int =[tokenizer.bos_token] + tokens
lowerCamelCase__ : Dict =[6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
lowerCamelCase__ : str =tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowercase = parser.parse_args()
if args.model_type == "roberta":
_lowercase = RobertaForMaskedLM.from_pretrained(args.model_name)
_lowercase = "roberta"
elif args.model_type == "gpt2":
_lowercase = GPTaLMHeadModel.from_pretrained(args.model_name)
_lowercase = "transformer"
_lowercase = model.state_dict()
_lowercase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowercase = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowercase = f'{prefix}.embeddings.{w}.weight'
_lowercase = state_dict[param_name]
for w in ["weight", "bias"]:
_lowercase = f'{prefix}.embeddings.LayerNorm.{w}'
_lowercase = state_dict[param_name]
# Transformer Blocks #
_lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowercase = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
_lowercase = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowercase = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowercase = state_dict[f'lm_head.dense.{w}']
_lowercase = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowercase = state_dict[f'{prefix}.ln_f.{w}']
_lowercase = state_dict["lm_head.weight"]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase__ : Optional[int] =[]
for current_row_idx in range(__A ):
lowerCamelCase__ : Any =populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase__ , lowerCamelCase__ : int =1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase__ : List[Any] =triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase__ : List[Any] =above_to_left_elt + above_to_right_elt
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase__ : Dict =[[1]]
for row_index in range(1 , __A ):
lowerCamelCase__ : Tuple =[0] + result[-1] + [0]
lowerCamelCase__ : Dict =row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase__ : Tuple =sum(divmod(__A , 2 ) )
lowerCamelCase__ : Tuple =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase__ : int =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase__ : List[str] =row_first_half + row_second_half
result.append(__A )
return result
def snake_case__ ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> None:
lowerCamelCase__ : Dict =f'''{func.__name__}({value})'''
lowerCamelCase__ : Union[str, Any] =timeit(f'''__main__.{call}''' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[str] ='platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = PegasusConfig
_a = {}
_a = """gelu"""
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=13, lowerCamelCase : Dict=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=False, lowerCamelCase : List[Any]=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : List[str]=5, lowerCamelCase : str=4, lowerCamelCase : Any=37, lowerCamelCase : int=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Union[str, Any]=20, lowerCamelCase : Dict=2, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Tuple=0, )-> Optional[int]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : Union[str, Any] =seq_length
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Union[str, Any] =num_hidden_layers
lowerCamelCase__ : Any =num_attention_heads
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_dropout_prob
lowerCamelCase__ : List[str] =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =eos_token_id
lowerCamelCase__ : Union[str, Any] =pad_token_id
lowerCamelCase__ : List[Any] =bos_token_id
def snake_case ( self : int )-> Any:
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ).clip(3, self.vocab_size )
lowerCamelCase__ : Any =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =np.concatenate([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : str =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : List[str] =prepare_pegasus_inputs_dict(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
return config, inputs_dict
def snake_case ( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : str )-> str:
lowerCamelCase__ : List[Any] =20
lowerCamelCase__ : int =model_class_name(UpperCAmelCase_ )
lowerCamelCase__ : Union[str, Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Dict =model.init_cache(decoder_input_ids.shape[0], UpperCAmelCase_, UpperCAmelCase_ )
lowerCamelCase__ : Any =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : Dict =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, :-1], UpperCAmelCase_, decoder_attention_mask=UpperCAmelCase_, past_key_values=UpperCAmelCase_, decoder_position_ids=UpperCAmelCase_, )
lowerCamelCase__ : Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:], UpperCAmelCase_, decoder_attention_mask=UpperCAmelCase_, past_key_values=outputs_cache.past_key_values, decoder_position_ids=UpperCAmelCase_, )
lowerCamelCase__ : Dict =model.decode(UpperCAmelCase_, UpperCAmelCase_ )
lowerCamelCase__ : Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : List[Any] )-> Dict:
lowerCamelCase__ : Any =20
lowerCamelCase__ : List[Any] =model_class_name(UpperCAmelCase_ )
lowerCamelCase__ : Dict =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Any =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : Any =model.init_cache(decoder_input_ids.shape[0], UpperCAmelCase_, UpperCAmelCase_ )
lowerCamelCase__ : Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, :-1], UpperCAmelCase_, decoder_attention_mask=UpperCAmelCase_, past_key_values=UpperCAmelCase_, decoder_position_ids=UpperCAmelCase_, )
lowerCamelCase__ : int =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, -1:], UpperCAmelCase_, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=UpperCAmelCase_, decoder_position_ids=UpperCAmelCase_, )
lowerCamelCase__ : Union[str, Any] =model.decode(UpperCAmelCase_, UpperCAmelCase_, decoder_attention_mask=UpperCAmelCase_ )
lowerCamelCase__ : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Tuple =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __SCREAMING_SNAKE_CASE ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_a = True
_a = False
_a = False
_a = False
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : List[Any] =FlaxPegasusModelTester(self )
lowerCamelCase__ : str =ConfigTester(self, config_class=UpperCAmelCase_ )
def snake_case ( self : Union[str, Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case ( self : Tuple )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : str =self._prepare_for_class(UpperCAmelCase_, UpperCAmelCase_ )
lowerCamelCase__ : str =model_class(UpperCAmelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=None, **lowerCamelCase : Optional[Any] ):
return model.encode(input_ids=UpperCAmelCase_, attention_mask=UpperCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Dict =encode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : List[Any] =encode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_, UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : List[Any] )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : int =model_class(UpperCAmelCase_ )
lowerCamelCase__ : int =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : Union[str, Any] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : List[Any] ):
return model.decode(
decoder_input_ids=UpperCAmelCase_, decoder_attention_mask=UpperCAmelCase_, encoder_outputs=UpperCAmelCase_, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : List[str] =decode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] =decode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_, UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Any )-> List[str]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Any =model_class_name.from_pretrained('''google/pegasus-large''', from_pt=UpperCAmelCase_ )
lowerCamelCase__ : Tuple =np.ones((1, 1) )
lowerCamelCase__ : Optional[Any] =model(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def snake_case ( self : Dict )-> Tuple:
lowerCamelCase__ : Union[str, Any] =FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCamelCase__ : str =PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCamelCase__ : List[str] =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ''',
]
lowerCamelCase__ : int =[
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCamelCase__ : Tuple =tokenizer(UpperCAmelCase_, return_tensors='''np''', truncation=UpperCAmelCase_, max_length=512, padding=UpperCAmelCase_ )
lowerCamelCase__ : Optional[Any] =model.generate(**UpperCAmelCase_, num_beams=2 ).sequences
lowerCamelCase__ : Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_, skip_special_tokens=UpperCAmelCase_ )
assert tgt_text == decoded
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =int(__lowerCamelCase )
assert noofclusters < len(__lowerCamelCase )
# Find out the dimensionality
lowerCamelCase__ : List[Any] =len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCamelCase__ : List[str] =list(range(len(__lowerCamelCase ) ) )
shuffle(__lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCamelCase__ : Union[str, Any] =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCamelCase__ : Optional[Any] =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCamelCase__ : int =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCamelCase__ : Any =tf.placeholder('''float64''' , [dim] )
lowerCamelCase__ : Tuple =[]
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCamelCase , __lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCamelCase__ : str =[tf.Variable(0 ) for i in range(len(__lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCamelCase__ : str =tf.placeholder('''int32''' )
lowerCamelCase__ : Any =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCamelCase , __lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCamelCase__ : Optional[int] =tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCamelCase__ : List[str] =tf.reduce_mean(__lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCamelCase__ : Optional[int] =tf.placeholder('''float''' , [dim] )
lowerCamelCase__ : Union[str, Any] =tf.placeholder('''float''' , [dim] )
lowerCamelCase__ : Optional[Any] =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCamelCase , __lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCamelCase__ : Dict =tf.placeholder('''float''' , [noofclusters] )
lowerCamelCase__ : Any =tf.argmin(__lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCamelCase__ : Optional[Any] =tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCamelCase__ : Union[str, Any] =100
for _ in range(__lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Optional[int] =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCamelCase__ : Tuple =[
sess.run(__lowerCamelCase , feed_dict={va: vect, va: sess.run(__lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCamelCase__ : Optional[Any] =sess.run(
__lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCamelCase ):
# Collect all the vectors assigned to this cluster
lowerCamelCase__ : Dict =[
vectors[i]
for i in range(len(__lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCamelCase__ : List[str] =sess.run(
__lowerCamelCase , feed_dict={mean_input: array(__lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCamelCase__ : int =sess.run(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =sess.run(__lowerCamelCase )
return centroids, assignments
| 703 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple, *lowerCamelCase : str, **lowerCamelCase : Any )-> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', UpperCamelCase_, )
super().__init__(*UpperCamelCase_, **UpperCamelCase_ )
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : str, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = True, **lowerCamelCase : List[Any], )-> int:
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : int =size if size is not None else {"shortest_edge": 224}
lowerCamelCase__ : Optional[Any] =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
lowerCamelCase__ : Tuple =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__ : Union[str, Any] =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase, param_name='''crop_size''' )
lowerCamelCase__ : List[str] =do_resize
lowerCamelCase__ : int =size
lowerCamelCase__ : Optional[int] =resample
lowerCamelCase__ : List[str] =do_center_crop
lowerCamelCase__ : Dict =crop_size
lowerCamelCase__ : List[Any] =do_rescale
lowerCamelCase__ : Optional[int] =rescale_factor
lowerCamelCase__ : Optional[Any] =do_normalize
lowerCamelCase__ : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : Optional[Any] =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Optional[Any] =do_convert_rgb
def snake_case ( self : Tuple, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : str, )-> Union[str, Any]:
lowerCamelCase__ : int =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase__ : Any =get_resize_output_image_size(__lowerCamelCase, size=size['''shortest_edge'''], default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], )-> Union[str, Any]:
lowerCamelCase__ : Any =get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__lowerCamelCase, size=(size['''height'''], size['''width''']), data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Union[int, float], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : List[str], )-> Tuple:
return rescale(__lowerCamelCase, scale=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Union[float, List[float]], lowerCamelCase : Union[float, List[float]], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Dict, )-> Tuple:
return normalize(__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = None, lowerCamelCase : bool = None, lowerCamelCase : int = None, lowerCamelCase : bool = None, lowerCamelCase : float = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Optional[Any], )-> Union[str, Any]:
lowerCamelCase__ : int =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Dict =size if size is not None else self.size
lowerCamelCase__ : str =get_size_dict(__lowerCamelCase, param_name='''size''', default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict =resample if resample is not None else self.resample
lowerCamelCase__ : List[str] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : int =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : List[str] =get_size_dict(__lowerCamelCase, param_name='''crop_size''', default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[int] =image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : Optional[int] =make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Union[str, Any] =[convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase__ : int =[self.resize(image=__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Any =[self.center_crop(image=__lowerCamelCase, size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : List[str] =[self.rescale(image=__lowerCamelCase, scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : Any =[self.normalize(image=__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase ) for image in images]
lowerCamelCase__ : Optional[Any] =[to_channel_dimension_format(__lowerCamelCase, __lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={"pixel_values": images}
return BatchFeature(data=__lowerCamelCase, tensor_type=__lowerCamelCase )
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_ ) )] )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , ):
"""simple docstring"""
if index == len(lowerCamelCase_ ):
print(lowerCamelCase_ )
return
for i in range(len(lowerCamelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase__ : List[str] =True
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ )
current_sequence.pop()
lowerCamelCase__ : Dict =False
_lowercase : Any = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowercase : int = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 706 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : Any =192
lowerCamelCase__ : Tuple =768
lowerCamelCase__ : Dict =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Any =[800, 1333]
lowerCamelCase__ : Optional[int] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Any =330
lowerCamelCase__ : int =14
lowerCamelCase__ : Dict =6
lowerCamelCase__ : Optional[int] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : List[str] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : Dict =12
lowerCamelCase__ : Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Any =[800, 1344]
lowerCamelCase__ : Optional[Any] =91
lowerCamelCase__ : Optional[int] ='huggingface/label-files'
lowerCamelCase__ : Tuple ='coco-detection-id2label.json'
lowerCamelCase__ : Dict =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[str] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] =idalabel
lowerCamelCase__ : List[str] ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : dict , __lowerCamelCase : YolosConfig , __lowerCamelCase : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Any =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Tuple =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[Any] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : Tuple =in_proj_bias[-config.hidden_size :]
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__ : str =name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCamelCase__ : Optional[int] =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCamelCase__ : Any =name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCamelCase__ : int =name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[int] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Any =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Any =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : str =name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCamelCase__ : Optional[int] =name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def snake_case__ ( __lowerCamelCase : dict , __lowerCamelCase : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] =orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : Optional[Any] =key.split('''.''' )
lowerCamelCase__ : Tuple =int(key_split[2] )
lowerCamelCase__ : Dict =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : str =val[:dim, :]
lowerCamelCase__ : Optional[Any] =val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] =val[-dim:, :]
else:
lowerCamelCase__ : Tuple =val[:dim]
lowerCamelCase__ : Union[str, Any] =val[dim : dim * 2]
lowerCamelCase__ : Union[str, Any] =val[-dim:]
else:
lowerCamelCase__ : str =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
"""simple docstring"""
lowerCamelCase__ : Tuple =get_yolos_config(__lowerCamelCase )
# load original state_dict
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['model']
# load 🤗 model
lowerCamelCase__ : Tuple =YolosForObjectDetection(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : Optional[Any] =800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : Any =YolosImageProcessor(format='''coco_detection''' , size=__lowerCamelCase )
lowerCamelCase__ : Any =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : str =outputs.logits, outputs.pred_boxes
lowerCamelCase__ : int =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : Tuple =torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase__ : Tuple =torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : List[str] =torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase__ : List[str] =torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase__ : List[Any] =torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : List[str] =torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase__ : List[Any] =torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : Dict =torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase__ : Optional[Any] =torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
lowerCamelCase__ : List[str] ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('''Pushing to the hub...''' )
lowerCamelCase__ : Optional[Any] =model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
model.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : Optional[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase : str = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase__ : Union[str, Any] =bs[:]
lowerCamelCase__ : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =set()
lowerCamelCase__ : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : List[str] =char
return pairs
class __SCREAMING_SNAKE_CASE ( __A ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Tuple, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : str="replace", lowerCamelCase : int="<s>", lowerCamelCase : Optional[int]="</s>", lowerCamelCase : Optional[int]="</s>", lowerCamelCase : List[Any]="<s>", lowerCamelCase : str="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : Union[str, Any]="<mask>", lowerCamelCase : str=False, **lowerCamelCase : int, )-> Dict:
lowerCamelCase__ : int =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else bos_token
lowerCamelCase__ : Union[str, Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else eos_token
lowerCamelCase__ : str =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else sep_token
lowerCamelCase__ : Optional[int] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else cls_token
lowerCamelCase__ : Union[str, Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else unk_token
lowerCamelCase__ : List[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Any =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
with open(lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : str =json.load(lowerCamelCase )
lowerCamelCase__ : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Optional[int] =errors # how to handle errors in decoding
lowerCamelCase__ : Tuple =bytes_to_unicode()
lowerCamelCase__ : int ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[int] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : Optional[int] ={}
lowerCamelCase__ : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : str =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self : Union[str, Any] )-> Optional[int]:
return len(self.encoder )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case ( self : int, lowerCamelCase : str )-> Optional[Any]:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : List[str] =tuple(lowerCamelCase )
lowerCamelCase__ : List[str] =get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : Tuple =min(lowerCamelCase, key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ : Optional[int] =bigram
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Optional[Any] =0
while i < len(lowerCamelCase ):
try:
lowerCamelCase__ : Dict =word.index(lowerCamelCase, lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Optional[int] =j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : List[str] =tuple(lowerCamelCase )
lowerCamelCase__ : str =new_word
if len(lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : Optional[Any] =get_pairs(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =''' '''.join(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =word
return word
def snake_case ( self : int, lowerCamelCase : Optional[Any] )-> Union[str, Any]:
lowerCamelCase__ : Dict =[]
for token in re.findall(self.pat, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any] )-> List[str]:
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token ) )
def snake_case ( self : Dict, lowerCamelCase : Optional[int] )-> Any:
return self.decoder.get(lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : str =''''''.join(lowerCamelCase )
lowerCamelCase__ : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[Any] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[Any] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase ) + '''\n''' )
lowerCamelCase__ : List[str] =0
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : Any =token_index
writer.write(''' '''.join(lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case ( self : str, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[int] =[self.cls_token_id]
lowerCamelCase__ : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Optional[int], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Dict =[self.sep_token_id]
lowerCamelCase__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int]=False, **lowerCamelCase : Optional[Any] )-> str:
lowerCamelCase__ : Tuple =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Union[str, Any] =''' ''' + text
return (text, kwargs)
def snake_case ( self : Any, lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCamelCase : Optional[int] = None, lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, )-> dict:
lowerCamelCase__ : Optional[int] =super()._pad(
encoded_inputs=lowerCamelCase, max_length=lowerCamelCase, padding_strategy=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : str =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Tuple =len(lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 708 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowercase : Any = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[int], lowerCamelCase : int = True, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Union[str, Any] = PILImageResampling.BILINEAR, lowerCamelCase : Dict = True, lowerCamelCase : int = None, lowerCamelCase : Dict = True, lowerCamelCase : str = 1 / 255, lowerCamelCase : Any = True, lowerCamelCase : str = True, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Tuple = None, **lowerCamelCase : int, )-> Tuple:
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ : Dict =size if size is not None else {'''shortest_edge''': 256}
lowerCamelCase__ : Dict =get_size_dict(_lowerCAmelCase, default_to_square=_lowerCAmelCase )
lowerCamelCase__ : List[str] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase__ : List[str] =get_size_dict(_lowerCAmelCase, param_name='''crop_size''' )
lowerCamelCase__ : str =do_resize
lowerCamelCase__ : Optional[int] =size
lowerCamelCase__ : int =do_center_crop
lowerCamelCase__ : Optional[int] =crop_size
lowerCamelCase__ : Optional[Any] =resample
lowerCamelCase__ : Dict =do_rescale
lowerCamelCase__ : int =rescale_factor
lowerCamelCase__ : Optional[int] =offset
lowerCamelCase__ : Optional[Any] =do_normalize
lowerCamelCase__ : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : str =image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple = PILImageResampling.BILINEAR, lowerCamelCase : Optional[int] = None, **lowerCamelCase : int, )-> List[Any]:
lowerCamelCase__ : Any =get_size_dict(_lowerCAmelCase, default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
lowerCamelCase__ : List[str] =get_resize_output_image_size(_lowerCAmelCase, size['''shortest_edge'''], default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : Dict =(size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase, size=_lowerCAmelCase, resample=_lowerCAmelCase, data_format=_lowerCAmelCase, **_lowerCAmelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Dict = None, **lowerCamelCase : Dict, )-> Tuple:
lowerCamelCase__ : Optional[Any] =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=_lowerCAmelCase, **_lowerCAmelCase )
def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] = True, lowerCamelCase : int = None, **lowerCamelCase : Optional[Any], )-> Dict:
lowerCamelCase__ : List[str] =image.astype(np.floataa )
if offset:
lowerCamelCase__ : List[str] =image - (scale / 2)
return rescale(_lowerCAmelCase, scale=_lowerCAmelCase, data_format=_lowerCAmelCase, **_lowerCAmelCase )
def snake_case ( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int] = None, **lowerCamelCase : List[str], )-> Optional[Any]:
return normalize(_lowerCAmelCase, mean=_lowerCAmelCase, std=_lowerCAmelCase, data_format=_lowerCAmelCase, **_lowerCAmelCase )
def snake_case ( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Any = None, lowerCamelCase : List[str] = None, lowerCamelCase : int = None, lowerCamelCase : Optional[Any] = None, lowerCamelCase : List[str] = None, lowerCamelCase : List[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : List[Any] = None, lowerCamelCase : Tuple = None, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : Union[str, Any] = ChannelDimension.FIRST, )-> Any:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict =to_numpy_array(_lowerCAmelCase )
if do_resize:
lowerCamelCase__ : str =self.resize(image=_lowerCAmelCase, size=_lowerCAmelCase, resample=_lowerCAmelCase )
if do_center_crop:
lowerCamelCase__ : Tuple =self.center_crop(_lowerCAmelCase, size=_lowerCAmelCase )
if do_rescale:
lowerCamelCase__ : Optional[Any] =self.rescale(image=_lowerCAmelCase, scale=_lowerCAmelCase, offset=_lowerCAmelCase )
if do_normalize:
lowerCamelCase__ : List[Any] =self.normalize(image=_lowerCAmelCase, mean=_lowerCAmelCase, std=_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] =to_channel_dimension_format(_lowerCAmelCase, _lowerCAmelCase )
return image
def snake_case ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : List[Any] = None, lowerCamelCase : Tuple = None, lowerCamelCase : List[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = None, lowerCamelCase : Any = None, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : Tuple = None, lowerCamelCase : List[str] = None, lowerCamelCase : List[str] = None, lowerCamelCase : Tuple = None, lowerCamelCase : int = None, lowerCamelCase : Optional[Any] = ChannelDimension.FIRST, **lowerCamelCase : List[Any], )-> Tuple:
lowerCamelCase__ : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Union[str, Any] =resample if resample is not None else self.resample
lowerCamelCase__ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[Any] =offset if offset is not None else self.offset
lowerCamelCase__ : Any =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Any =image_std if image_std is not None else self.image_std
lowerCamelCase__ : List[str] =size if size is not None else self.size
lowerCamelCase__ : List[str] =get_size_dict(_lowerCAmelCase, default_to_square=_lowerCAmelCase )
lowerCamelCase__ : Tuple =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Optional[int] =get_size_dict(_lowerCAmelCase, param_name='''crop_size''' )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCamelCase__ : Any =make_batched(_lowerCAmelCase )
lowerCamelCase__ : int =[
[
self._preprocess_image(
image=_lowerCAmelCase, do_resize=_lowerCAmelCase, size=_lowerCAmelCase, resample=_lowerCAmelCase, do_center_crop=_lowerCAmelCase, crop_size=_lowerCAmelCase, do_rescale=_lowerCAmelCase, rescale_factor=_lowerCAmelCase, offset=_lowerCAmelCase, do_normalize=_lowerCAmelCase, image_mean=_lowerCAmelCase, image_std=_lowerCAmelCase, data_format=_lowerCAmelCase, )
for img in video
]
for video in videos
]
lowerCamelCase__ : Tuple ={'''pixel_values''': videos}
return BatchFeature(data=_lowerCAmelCase, tensor_type=_lowerCAmelCase )
| 709 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ={}
state_dict.pop('''pixel_mean''' , lowerCamelCase__ )
state_dict.pop('''pixel_std''' , lowerCamelCase__ )
lowerCamelCase__ : Union[str, Any] =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ : str =key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase__ : Tuple =int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__ : List[str] =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowerCamelCase__ : int =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowerCamelCase__ : List[str] =key.replace('''layers.2''' , '''proj_out''' )
lowerCamelCase__ : Optional[Any] =value
lowerCamelCase__ : str =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="ybelkada/segment-anything" ):
"""simple docstring"""
lowerCamelCase__ : Dict =hf_hub_download(lowerCamelCase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase__ : Optional[int] =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__ : Dict =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase__ : Optional[int] =SamConfig(
vision_config=lowerCamelCase__ , )
elif "sam_vit_h" in model_name:
lowerCamelCase__ : List[str] =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase__ : Any =SamConfig(
vision_config=lowerCamelCase__ , )
lowerCamelCase__ : Union[str, Any] =torch.load(lowerCamelCase__ , map_location='''cpu''' )
lowerCamelCase__ : int =replace_keys(lowerCamelCase__ )
lowerCamelCase__ : int =SamImageProcessor()
lowerCamelCase__ : Any =SamProcessor(image_processor=lowerCamelCase__ )
lowerCamelCase__ : str =SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
lowerCamelCase__ : List[str] =hf_model.to('''cuda''' )
lowerCamelCase__ : List[Any] ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase__ : Tuple =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('''RGB''' )
lowerCamelCase__ : List[str] =[[[400, 650]]]
lowerCamelCase__ : Any =[[1]]
lowerCamelCase__ : Any =processor(images=np.array(lowerCamelCase__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : str =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : int =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
lowerCamelCase__ : Tuple =processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : Optional[Any] =output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
lowerCamelCase__ : Any =((75, 275, 1725, 850),)
lowerCamelCase__ : Any =processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : Dict =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : Tuple =output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
lowerCamelCase__ : str =[[[400, 650], [800, 650]]]
lowerCamelCase__ : Tuple =[[1, 1]]
lowerCamelCase__ : str =processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : List[Any] =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : str =output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
_lowercase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowercase : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : List[Any], lowerCamelCase : str )-> Any:
with open(__A, encoding='''utf-8''' ) as input_file:
lowerCamelCase__ : List[str] =re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
lowerCamelCase__ : List[Any] =input_file.read()
lowerCamelCase__ : Any =regexp.search(__A )
return match
def snake_case ( self : str, lowerCamelCase : str )-> Union[str, Any]:
with open(__A, encoding='''utf-8''' ) as input_file:
lowerCamelCase__ : str =re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
lowerCamelCase__ : Tuple =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase__ : int =regexp.finditer(__A )
lowerCamelCase__ : Tuple =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self : int )-> Union[str, Any]:
lowerCamelCase__ : Tuple =Path('''./datasets''' )
lowerCamelCase__ : List[Any] =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__A ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =Path('''./datasets''' )
lowerCamelCase__ : Any =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__A ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 711 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 0 |
"""simple docstring"""
from manim import *
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : Any =Rectangle(height=0.5, width=0.5 )
lowerCamelCase__ : str =Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : List[str] =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[Any] =[mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] =VGroup(*_a ).arrange(_a, buff=0 )
lowerCamelCase__ : Union[str, Any] =VGroup(*_a ).arrange(_a, buff=0 )
lowerCamelCase__ : Tuple =VGroup(_a, _a ).arrange(_a, buff=0 )
lowerCamelCase__ : int =Text('''CPU''', font_size=24 )
lowerCamelCase__ : List[str] =Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
lowerCamelCase__ : List[Any] =[mem.copy() for i in range(1 )]
lowerCamelCase__ : str =VGroup(*_a ).arrange(_a, buff=0 )
lowerCamelCase__ : List[str] =Text('''GPU''', font_size=24 )
lowerCamelCase__ : List[Any] =Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
gpu.align_to(_a, _a )
gpu.set_x(gpu.get_x() - 1 )
self.add(_a )
lowerCamelCase__ : List[str] =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] =VGroup(*_a ).arrange(_a, buff=0 )
lowerCamelCase__ : Union[str, Any] =Text('''Model''', font_size=24 )
lowerCamelCase__ : int =Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.play(
Create(_a, run_time=1 ), Create(_a, run_time=1 ), Create(_a, run_time=1 ), )
lowerCamelCase__ : Union[str, Any] =MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
lowerCamelCase__ : Optional[int] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : int =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_a, run_time=2.5 ), Write(_a ), Write(_a ) )
self.add(_a )
lowerCamelCase__ : List[str] =[]
lowerCamelCase__ : str =[]
lowerCamelCase__ : Optional[Any] =[]
for i, rect in enumerate(_a ):
lowerCamelCase__ : Tuple =Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(_a, opacity=0.7 )
cpu_target.move_to(_a )
cpu_target.generate_target()
lowerCamelCase__ : Optional[int] =0.46 / 4
lowerCamelCase__ : List[Any] =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=_a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=_a, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=_a, buff=0.0 )
cpu_targs.append(_a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_a ) )
second_animations.append(MoveToTarget(_a, run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 712 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : int =OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
if conf_path is None:
lowerCamelCase__ : List[Any] ='''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase__ : List[str] =load_config(__UpperCAmelCase , display=__UpperCAmelCase )
lowerCamelCase__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ : Dict ='''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase__ : int =torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
lowerCamelCase__ : Any =sd['''state_dict''']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =model.encode(__UpperCAmelCase )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ : Optional[int] =model.decode(__UpperCAmelCase )
return xrec
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase__ : List[str] =importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True ):
"""simple docstring"""
lowerCamelCase__ : Any =instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if ckpt:
lowerCamelCase__ : List[Any] =torch.load(__UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : Optional[int] =pl_sd['''global_step''']
print(f'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ : Dict ={'''state_dict''': None}
lowerCamelCase__ : Optional[Any] =None
lowerCamelCase__ : Optional[Any] =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['''model''']
return model, global_step
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowercase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : bool, lowerCamelCase : str = None, lowerCamelCase : list = None )-> str:
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : int =os.path.abspath(os.path.join('''examples''', '''by_feature''' ) )
lowerCamelCase__ : Optional[int] =os.path.abspath('''examples''' )
for item in os.listdir(_UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase__ : Optional[int] =os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCAmelCase, feature_script=_UpperCAmelCase, tested_section='''main()''' if parser_only else '''training_function()''', ):
lowerCamelCase__ : List[Any] =compare_against_test(
os.path.join(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase__ : Any ='''\n'''.join(_UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowerCamelCase__ : Optional[Any] =diff.replace(_UpperCAmelCase, '''''' )
self.assertEqual(_UpperCAmelCase, '''''' )
def snake_case ( self : Union[str, Any] )-> Any:
self.one_complete_example('''complete_nlp_example.py''', _UpperCAmelCase )
self.one_complete_example('''complete_nlp_example.py''', _UpperCAmelCase )
def snake_case ( self : Optional[Any] )-> Dict:
lowerCamelCase__ : Any =os.path.abspath(os.path.join('''examples''', '''cv_example.py''' ) )
lowerCamelCase__ : Optional[Any] =[
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''\"accuracy\": eval_metric[\"accuracy\"],\n\n''',
''' ''' * 20 + '''\"f1\": eval_metric[\"f1\"],\n\n''',
''' ''' * 20 + '''\"train_loss\": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''\"epoch\": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''', _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
self.one_complete_example('''complete_cv_example.py''', _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = False
@classmethod
def snake_case ( cls : Tuple )-> str:
super().setUpClass()
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =os.path.join(cls._tmpdir, '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase__ : Tuple =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : List[Any] )-> Union[str, Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''epoch_0''' ) ) )
def snake_case ( self : Tuple )-> List[str]:
lowerCamelCase__ : List[str] =F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCamelCase__ : Tuple =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''step_2''' ) ) )
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : int =F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0" )}
'''.split()
lowerCamelCase__ : Union[str, Any] =run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
self.assertNotIn('''epoch 0:''', _UpperCAmelCase )
self.assertIn('''epoch 1:''', _UpperCAmelCase )
def snake_case ( self : List[Any] )-> Optional[Any]:
lowerCamelCase__ : str =F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, "step_2" )}
'''.split()
lowerCamelCase__ : Union[str, Any] =run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
if torch.cuda.is_available():
lowerCamelCase__ : Optional[int] =torch.cuda.device_count()
else:
lowerCamelCase__ : Union[str, Any] =1
if num_processes > 1:
self.assertNotIn('''epoch 0:''', _UpperCAmelCase )
self.assertIn('''epoch 1:''', _UpperCAmelCase )
else:
self.assertIn('''epoch 0:''', _UpperCAmelCase )
self.assertIn('''epoch 1:''', _UpperCAmelCase )
@slow
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : Optional[int] ='''\n examples/by_feature/cross_validation.py\n --num_folds 2\n '''.split()
with mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase__ : Union[str, Any] =run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
lowerCamelCase__ : Tuple =re.findall('''({.+})''', _UpperCAmelCase )
lowerCamelCase__ : List[str] =[r for r in results if '''accuracy''' in r][-1]
lowerCamelCase__ : Tuple =ast.literal_eval(_UpperCAmelCase )
self.assertGreaterEqual(results['''accuracy'''], 0.75 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Optional[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase__ : Dict =F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase, '''tracking''' ) ) )
def snake_case ( self : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Union[str, Any] =['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 714 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 0 |
"""simple docstring"""
import itertools
import math
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
if is_prime(__A ):
yield num
num += 1
def snake_case__ ( __lowerCamelCase : Optional[Any] = 10001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __A ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowercase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if num <= 0:
lowerCamelCase__ : int =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[True] * (num + 1)
lowerCamelCase__ : List[str] =[]
lowerCamelCase__ : Dict =2
lowerCamelCase__ : str =int(math.sqrt(_lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowerCamelCase ):
if sieve[i] is True:
lowerCamelCase__ : List[Any] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : List[str] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ="https://pypi.org/pypi/diffusers/json"
lowerCamelCase__ : List[Any] =json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda __lowerCamelCase : version.Version(_lowerCamelCase ) )
def snake_case__ ( ):
"""simple docstring"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCamelCase__ : List[Any] =Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
init_hf_modules()
lowerCamelCase__ : Tuple =Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCamelCase__ : Optional[int] =dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Union[str, Any] =f.read()
# Imports of the form `import .xxx`
lowerCamelCase__ : Union[str, Any] =re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Tuple =[module_file]
lowerCamelCase__ : Optional[int] =[]
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase__ : Optional[Any] =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =Path(_lowerCamelCase ).parent
lowerCamelCase__ : List[str] =[str(module_path / m ) for m in new_imports]
lowerCamelCase__ : Optional[Any] =[f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase__ : Union[str, Any] =[f'''{f}.py''' for f in new_import_files]
lowerCamelCase__ : Tuple =len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Dict =f.read()
# Imports of the form `import xxx`
lowerCamelCase__ : List[str] =re.findall('''^\s*import\s+(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase__ : Optional[int] =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowerCamelCase__ : Any =list(set(_lowerCamelCase ) )
lowerCamelCase__ : Tuple =[]
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'''{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`''' )
return get_relative_imports(_lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : int =module_path.replace(os.path.sep , '''.''' )
lowerCamelCase__ : Optional[Any] =importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCamelCase__ : int =dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
lowerCamelCase__ : Union[str, Any] =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
lowerCamelCase__ : Any =cls
return pipeline_class
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Any = False , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : List[str] = False , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =str(_lowerCamelCase )
lowerCamelCase__ : Any =os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =module_file_or_url
lowerCamelCase__ : Any ="local"
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowerCamelCase__ : Optional[Any] =get_diffusers_versions()
# cut ".dev0"
lowerCamelCase__ : Union[str, Any] ="v" + ".".join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase__ : List[Any] =latest_version if latest_version[1:] in available_versions else "main"
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCamelCase__ : Optional[Any] =f'''v{revision}'''
elif revision == "main":
lowerCamelCase__ : Dict =revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
lowerCamelCase__ : Dict =COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
lowerCamelCase__ : Optional[int] =cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowerCamelCase__ : Optional[Any] ="git"
lowerCamelCase__ : Any =pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase__ : Any =hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowerCamelCase__ : Optional[Any] =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCamelCase__ : List[str] =check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase__ : List[str] =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
lowerCamelCase__ : Optional[int] =Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase__ : int =f'''{module_needed}.py'''
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =use_auth_token
elif use_auth_token is True:
lowerCamelCase__ : Dict =HfFolder.get_token()
else:
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : List[str] =model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase__ : str =submodule_path / commit_hash
lowerCamelCase__ : List[str] =full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f'''{module_needed}.py''' , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : int = None , __lowerCamelCase : List[str] = False , __lowerCamelCase : Optional[int] = False , __lowerCamelCase : Tuple = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Dict = False , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
lowerCamelCase__ : int =get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace('''.py''' , '''''' ) )
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : Any = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_a = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_a = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_a = field(
default=_UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_a = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
_a = field(default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def snake_case ( self : Union[str, Any] )-> Dict:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase__ : Union[str, Any] =self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase__ : Optional[int] =self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_a = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ : Dict =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ : Dict =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase__ : Union[str, Any] =training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[int] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Any =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : List[str] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase__ : Any ={'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase__ : List[Any] =data_args.train_file.split('''.''' )[-1]
lowerCamelCase__ : Optional[int] =data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase__ : str =data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase__ : int =load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase__ : Tuple =load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase__ : str =raw_datasets['''train'''].features['''label'''].names
lowerCamelCase__ : Union[str, Any] =len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCamelCase__ : Dict =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
lowerCamelCase__ : List[Any] =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ : Tuple ='''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ : Optional[Any] =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase__ : Tuple ={'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase__ : List[Any] ={0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowerCamelCase__ : Optional[int] =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCamelCase : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCamelCase : List[Any] ):
lowerCamelCase__ : List[Any] =[_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase__ : Dict =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCamelCase__ : List[Any] =examples['''statement''']
lowerCamelCase__ : Optional[int] =list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCamelCase__ : Any =tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
lowerCamelCase__ : List[Any] =examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase__ : Optional[Any] =raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase__ : List[str] =raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase__ : Any =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase__ : List[str] =raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Union[str, Any] =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase__ : Tuple =raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase__ : Optional[int] =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
lowerCamelCase__ : str =p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
lowerCamelCase__ : Tuple =np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ : Tuple =default_data_collator
elif training_args.fpaa:
lowerCamelCase__ : Union[str, Any] =DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
lowerCamelCase__ : List[Any] =None
# Initialize our Trainer
lowerCamelCase__ : Optional[Any] =Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
lowerCamelCase__ : List[str] =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Dict =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : str =last_checkpoint
lowerCamelCase__ : str =trainer.train(resume_from_checkpoint=a_ )
lowerCamelCase__ : Optional[int] =train_result.metrics
lowerCamelCase__ : int =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCamelCase__ : Optional[int] =min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Tuple =trainer.evaluate(eval_dataset=a_ )
lowerCamelCase__ : str =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowerCamelCase__ : Union[str, Any] =min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase__ : Optional[Any] =predict_dataset.remove_columns('''label''' )
lowerCamelCase__ : Optional[Any] =trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
lowerCamelCase__ : Union[str, Any] =np.argmax(a_ , axis=1 )
lowerCamelCase__ : Optional[int] =os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
lowerCamelCase__ : Union[str, Any] =label_list[item]
writer.write(f'''{index}\t{item}\n''' )
lowerCamelCase__ : Optional[int] ={'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
_a = ['image_processor', 'tokenizer']
_a = 'OwlViTImageProcessor'
_a = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int], lowerCamelCase : Any=None, lowerCamelCase : Optional[int]=None, **lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : List[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __UpperCamelCase, )
lowerCamelCase__ : int =kwargs.pop('''feature_extractor''' )
lowerCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase, __UpperCamelCase )
def __call__( self : str, lowerCamelCase : Optional[Any]=None, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any="max_length", lowerCamelCase : List[str]="np", **lowerCamelCase : Dict )-> str:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__UpperCamelCase, __UpperCamelCase ) or (isinstance(__UpperCamelCase, __UpperCamelCase ) and not isinstance(text[0], __UpperCamelCase )):
lowerCamelCase__ : Any =[self.tokenizer(__UpperCamelCase, padding=__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )]
elif isinstance(__UpperCamelCase, __UpperCamelCase ) and isinstance(text[0], __UpperCamelCase ):
lowerCamelCase__ : Dict =[]
# Maximum number of queries across batch
lowerCamelCase__ : str =max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
lowerCamelCase__ : Tuple =t + [''' '''] * (max_num_queries - len(__UpperCamelCase ))
lowerCamelCase__ : Optional[Any] =self.tokenizer(__UpperCamelCase, padding=__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCamelCase__ : Union[str, Any] =np.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCamelCase__ : Dict =np.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase__ : List[str] =jnp.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCamelCase__ : Tuple =jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase__ : Dict =torch.cat([encoding['''input_ids'''] for encoding in encodings], dim=0 )
lowerCamelCase__ : Tuple =torch.cat([encoding['''attention_mask'''] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase__ : Tuple =tf.stack([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCamelCase__ : List[str] =tf.stack([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCamelCase__ : List[str] =BatchEncoding()
lowerCamelCase__ : Optional[int] =input_ids
lowerCamelCase__ : Optional[int] =attention_mask
if query_images is not None:
lowerCamelCase__ : int =BatchEncoding()
lowerCamelCase__ : Optional[Any] =self.image_processor(
__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase ).pixel_values
lowerCamelCase__ : List[Any] =query_pixel_values
if images is not None:
lowerCamelCase__ : Tuple =self.image_processor(__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase__ : Tuple =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase__ : Any =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ), tensor_type=__UpperCamelCase )
def snake_case ( self : Optional[int], *lowerCamelCase : Dict, **lowerCamelCase : Optional[int] )-> List[str]:
return self.image_processor.post_process(*__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Optional[Any], *lowerCamelCase : Union[str, Any], **lowerCamelCase : Any )-> Optional[int]:
return self.image_processor.post_process_object_detection(*__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Union[str, Any], *lowerCamelCase : Optional[int], **lowerCamelCase : List[str] )-> Dict:
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Optional[int], *lowerCamelCase : List[Any], **lowerCamelCase : Union[str, Any] )-> Tuple:
return self.tokenizer.batch_decode(*__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Any, *lowerCamelCase : Tuple, **lowerCamelCase : Dict )-> Dict:
return self.tokenizer.decode(*__UpperCamelCase, **__UpperCamelCase )
@property
def snake_case ( self : Union[str, Any] )-> List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', __UpperCamelCase, )
return self.image_processor_class
@property
def snake_case ( self : Dict )-> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', __UpperCamelCase, )
return self.image_processor
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
_a = XLMRobertaTokenizer
_a = XLMRobertaTokenizerFast
_a = True
_a = True
def snake_case ( self : Dict )-> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : int =XLMRobertaTokenizer(snake_case_, keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple ='''<pad>'''
lowerCamelCase__ : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ), snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ), snake_case_ )
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ : Any =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''<mask>''' )
self.assertEqual(len(snake_case_ ), 1002 )
def snake_case ( self : List[str] )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 1002 )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : str =XLMRobertaTokenizer(snake_case_, keep_accents=snake_case_ )
lowerCamelCase__ : List[str] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case_, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case_, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : List[str] =tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : List[Any] )-> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : Dict =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(snake_case_, **snake_case_ )
lowerCamelCase__ : Tuple =self.tokenizer_class.from_pretrained(snake_case_, **snake_case_ )
lowerCamelCase__ : List[Any] =tempfile.mkdtemp()
lowerCamelCase__ : Any =tokenizer_r.save_pretrained(snake_case_ )
lowerCamelCase__ : List[Any] =tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : Union[str, Any] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(snake_case_, snake_case_ )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[int] =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_, snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Any =tempfile.mkdtemp()
lowerCamelCase__ : Any =tokenizer_r.save_pretrained(snake_case_, legacy_format=snake_case_ )
lowerCamelCase__ : List[str] =tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_, snake_case_ )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ : Union[str, Any] =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_, snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[Any] =tempfile.mkdtemp()
lowerCamelCase__ : Tuple =tokenizer_r.save_pretrained(snake_case_, legacy_format=snake_case_ )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Tuple =tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_, snake_case_ ) )
shutil.rmtree(snake_case_ )
@cached_property
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case ( self : Optional[Any] )-> Tuple:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_, f.name )
lowerCamelCase__ : Optional[Any] =XLMRobertaTokenizer(f.name, keep_accents=snake_case_ )
lowerCamelCase__ : str =pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def snake_case ( self : Optional[Any] )-> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Optional[int] =self.get_tokenizer()
lowerCamelCase__ : Any =self.get_rust_tokenizer()
lowerCamelCase__ : Any ='''I was born in 92000, and this is falsé.'''
lowerCamelCase__ : int =tokenizer.tokenize(snake_case_ )
lowerCamelCase__ : int =rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_, snake_case_ )
lowerCamelCase__ : Tuple =tokenizer.encode(snake_case_, add_special_tokens=snake_case_ )
lowerCamelCase__ : Dict =rust_tokenizer.encode(snake_case_, add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_, snake_case_ )
lowerCamelCase__ : List[Any] =self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] =tokenizer.encode(snake_case_ )
lowerCamelCase__ : Union[str, Any] =rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_, snake_case_ )
@slow
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Optional[Any] ='''Hello World!'''
lowerCamelCase__ : Dict =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_, self.big_tokenizer.encode(snake_case_ ) )
@slow
def snake_case ( self : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCamelCase__ : int =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_, self.big_tokenizer.encode(snake_case_ ) )
@slow
def snake_case ( self : List[Any] )-> List[Any]:
# fmt: off
lowerCamelCase__ : Union[str, Any] ={'''input_ids''': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_, model_name='''xlm-roberta-base''', revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''', )
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=False ):
"""simple docstring"""
try:
lowerCamelCase__ : List[str] =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ : Optional[Any] =default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ : Optional[Any] =strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_lowercase : Optional[Any] = parse_flag_from_env("RUN_SLOW", default=False)
_lowercase : int = parse_flag_from_env("RUN_REMOTE", default=False)
_lowercase : List[Any] = parse_flag_from_env("RUN_LOCAL", default=True)
_lowercase : Optional[Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_lowercase : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_lowercase : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_lowercase : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_lowercase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
_lowercase : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_lowercase : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_lowercase : Union[str, Any] = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase__ : int =unittest.skip('''test requires faiss''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase__ : Dict =unittest.skip('''test requires regex''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase__ : str =unittest.skip('''test requires elasticsearch''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase__ : Tuple =unittest.skip('''test requires sqlalchemy''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase__ : Any =unittest.skip('''test requires PyTorch''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase__ : Tuple =unittest.skip('''test requires TensorFlow''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase__ : Optional[Any] =unittest.skip('''test requires JAX''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase__ : Optional[Any] =unittest.skip('''test requires Pillow''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
def _require_spacy_model(__lowerCamelCase : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(__UpperCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase__ : Any =unittest.skip('''test is slow''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase__ : Optional[int] =unittest.skip('''test is local''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase__ : Union[str, Any] =unittest.skip('''test is packaged''' )(__UpperCamelCase )
return test_case
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase__ : List[str] =unittest.skip('''test requires remote''' )(__UpperCamelCase )
return test_case
def snake_case__ ( *__lowerCamelCase : List[str] ):
"""simple docstring"""
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
lowerCamelCase__ : Optional[int] =decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_a = 0
_a = 1
_a = 2
@contextmanager
def snake_case__ ( __lowerCamelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __lowerCamelCase : Optional[int]=1e-1_6 ):
"""simple docstring"""
lowerCamelCase__ : Dict =requests.Session().request
def timeout_request(__lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , **__lowerCamelCase : List[Any] ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase__ : Optional[int] ="""https://10.255.255.1"""
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowerCamelCase__ : Optional[Any] =timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase__ : Union[str, Any] =url
lowerCamelCase__ : Dict =e.args[0]
lowerCamelCase__ : int =(max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
lowerCamelCase__ : Union[str, Any] =(max_retry_error,)
raise
def raise_connection_error(__lowerCamelCase : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : int ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def snake_case__ ( *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase__ : Optional[Any] =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase__ : int =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist()
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith('''500''' ) or str(__UpperCamelCase ).startswith('''502''' ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any] )-> Dict:
lowerCamelCase__ : List[str] =returncode
lowerCamelCase__ : Dict =stdout
lowerCamelCase__ : Union[str, Any] =stderr
async def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
while True:
lowerCamelCase__ : int =await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
lowerCamelCase__ : Union[str, Any] =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
def tee(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str]="" ):
lowerCamelCase__ : Any =line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowerCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __lowerCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=180 , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=True ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =asyncio.get_event_loop()
lowerCamelCase__ : List[Any] =loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
lowerCamelCase__ : int =""" """.join(__UpperCamelCase )
if result.returncode > 0:
lowerCamelCase__ : Tuple ="""\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
lowerCamelCase__ : List[Any] =re.sub(R'''^gw''' , '''''' , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =29500
lowerCamelCase__ : Optional[Any] =pytest_xdist_worker_id()
return port + uniq_delta
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def snake_case__ ( __lowerCamelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowerCamelCase__ : Optional[int] =nums[0]
for i in range(1 , len(snake_case__ ) ):
lowerCamelCase__ : Any =nums[i]
lowerCamelCase__ : List[Any] =max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input("Enter number of elements : ").strip())
_lowercase = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple )-> Dict:
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : str =0
lowerCamelCase__ : Tuple =0
def snake_case ( self : List[str] )-> List[str]:
return self.head == self.tail
def snake_case ( self : str, lowerCamelCase : Optional[int] )-> int:
self.data.append(lowerCamelCase )
lowerCamelCase__ : List[Any] =self.tail + 1
def snake_case ( self : str )-> Any:
lowerCamelCase__ : int =self.data[self.head]
lowerCamelCase__ : Tuple =self.head + 1
return ret
def snake_case ( self : Union[str, Any] )-> str:
return self.tail - self.head
def snake_case ( self : Tuple )-> int:
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Any =data
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Any =1
def snake_case ( self : str )-> Tuple:
return self.data
def snake_case ( self : str )-> Dict:
return self.left
def snake_case ( self : Tuple )-> List[Any]:
return self.right
def snake_case ( self : Tuple )-> Union[str, Any]:
return self.height
def snake_case ( self : Optional[Any], lowerCamelCase : Dict )-> List[str]:
lowerCamelCase__ : Union[str, Any] =data
def snake_case ( self : Optional[int], lowerCamelCase : List[Any] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =node
def snake_case ( self : int, lowerCamelCase : Tuple )-> Union[str, Any]:
lowerCamelCase__ : Dict =node
def snake_case ( self : Dict, lowerCamelCase : Any )-> str:
lowerCamelCase__ : str =height
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
if a > b:
return a
return b
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
lowerCamelCase__ : str =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Any =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
lowerCamelCase__ : Tuple =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =node.get_left()
assert left_child is not None
node.set_left(left_rotation(__SCREAMING_SNAKE_CASE ) )
return right_rotation(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =node.get_right()
assert right_child is not None
node.set_right(right_rotation(__SCREAMING_SNAKE_CASE ) )
return left_rotation(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
"""simple docstring"""
if node is None:
return MyNode(__SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCamelCase__ : List[Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase__ : int =right_rotation(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : Any =lr_rotation(__SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , __SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCamelCase__ : Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase__ : Optional[int] =rl_rotation(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : Optional[int] =left_rotation(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Dict =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
return node
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
while True:
lowerCamelCase__ : List[Any] =root.get_right()
if right_child is None:
break
lowerCamelCase__ : Optional[Any] =right_child
return root.get_data()
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
while True:
lowerCamelCase__ : Dict =root.get_left()
if left_child is None:
break
lowerCamelCase__ : int =left_child
return root.get_data()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =root.get_left()
lowerCamelCase__ : Tuple =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase__ : List[Any] =get_left_most(__SCREAMING_SNAKE_CASE )
root.set_data(__SCREAMING_SNAKE_CASE )
root.set_right(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
lowerCamelCase__ : List[Any] =left_child
elif right_child is not None:
lowerCamelCase__ : List[Any] =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCamelCase__ : Optional[Any] =left_rotation(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : int =rl_rotation(__SCREAMING_SNAKE_CASE )
elif get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCamelCase__ : List[str] =right_rotation(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : Any =lr_rotation(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__SCREAMING_SNAKE_CASE )
return root
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Any =None
def snake_case ( self : Tuple )-> List[str]:
return get_height(self.root )
def snake_case ( self : Tuple, lowerCamelCase : Dict )-> List[Any]:
print('''insert:''' + str(lowerCamelCase ) )
lowerCamelCase__ : Optional[int] =insert_node(self.root, lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : str )-> List[str]:
print('''delete:''' + str(lowerCamelCase ) )
if self.root is None:
print('''Tree is empty!''' )
return
lowerCamelCase__ : Union[str, Any] =del_node(self.root, lowerCamelCase )
def __str__( self : Any, )-> List[str]: # a level traversale, gives a more intuitive look on the tree
lowerCamelCase__ : Optional[int] =''''''
lowerCamelCase__ : Optional[int] =MyQueue()
q.push(self.root )
lowerCamelCase__ : str =self.get_height()
if layer == 0:
return output
lowerCamelCase__ : Optional[Any] =0
while not q.is_empty():
lowerCamelCase__ : List[Any] =q.pop()
lowerCamelCase__ : str =''' ''' * int(math.pow(2, layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCamelCase )
q.push(lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase__ : Optional[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2, lowerCamelCase ) - 1:
lowerCamelCase__ : Any =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowercase : Optional[Any] = AVLtree()
_lowercase : int = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =StableDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCamelCase__ : str =load_file(__lowerCamelCase )
lowerCamelCase__ : Any =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCamelCase__ : int =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : Optional[Any] =pipeline.text_encoder
else:
lowerCamelCase__ : Tuple =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : List[str] =pipeline.unet
# find the target layer
lowerCamelCase__ : Dict =layer_infos.pop(0 )
while len(__lowerCamelCase ) > -1:
try:
lowerCamelCase__ : List[str] =curr_layer.__getattr__(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCamelCase__ : List[Any] =layer_infos.pop(0 )
elif len(__lowerCamelCase ) == 0:
break
except Exception:
if len(__lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCamelCase__ : Optional[int] =layer_infos.pop(0 )
lowerCamelCase__ : List[Any] =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__lowerCamelCase )
else:
pair_keys.append(__lowerCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCamelCase__ : int =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCamelCase__ : Union[str, Any] =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCamelCase__ : Union[str, Any] =state_dict[pair_keys[0]].to(torch.floataa )
lowerCamelCase__ : Dict =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCamelCase )
return pipeline
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_lowercase : Optional[Any] =parser.parse_args()
_lowercase : Optional[Any] =args.base_model_path
_lowercase : int =args.checkpoint_path
_lowercase : Tuple =args.dump_path
_lowercase : int =args.lora_prefix_unet
_lowercase : Dict =args.lora_prefix_text_encoder
_lowercase : List[str] =args.alpha
_lowercase : int =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowercase : List[Any] =pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Dict =int(snake_case_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case_ )
lowerCamelCase__ : Dict =divmod(snake_case_ , 2 )
return binary_recursive(snake_case_ ) + str(snake_case_ )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =str(snake_case_ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowerCamelCase__ : Optional[int] ="-" if number.startswith('''-''' ) else ""
lowerCamelCase__ : Optional[Any] =number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(snake_case_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 0 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any )-> None:
lowerCamelCase__ : dict[str, TrieNode] ={} # Mapping from char to TrieNode
lowerCamelCase__ : Tuple =False
def snake_case ( self : int, lowerCamelCase : list[str] )-> None:
for word in words:
self.insert(lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : str )-> None:
lowerCamelCase__ : Tuple =self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : str =TrieNode()
lowerCamelCase__ : int =curr.nodes[char]
lowerCamelCase__ : Union[str, Any] =True
def snake_case ( self : str, lowerCamelCase : str )-> bool:
lowerCamelCase__ : Dict =self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Tuple =curr.nodes[char]
return curr.is_leaf
def snake_case ( self : Optional[int], lowerCamelCase : str )-> None:
def _delete(lowerCamelCase : TrieNode, lowerCamelCase : str, lowerCamelCase : int ) -> bool:
if index == len(lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : Tuple =False
return len(curr.nodes ) == 0
lowerCamelCase__ : Optional[int] =word[index]
lowerCamelCase__ : List[Any] =curr.nodes.get(lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : Optional[int] =_delete(lowerCamelCase, lowerCamelCase, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, lowerCamelCase, 0 )
def snake_case__ ( __lowerCamelCase : TrieNode , __lowerCamelCase : str ):
"""simple docstring"""
if node.is_leaf:
print(_lowerCamelCase , end=''' ''' )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple ="banana bananas bandana band apple all beast".split()
lowerCamelCase__ : Dict =TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : bool ):
"""simple docstring"""
print(str(_lowerCamelCase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case__ ( ):
"""simple docstring"""
assert test_trie()
def snake_case__ ( ):
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["MobileViTFeatureExtractor"]
_lowercase : Tuple = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=__a ):
'''simple docstring'''
_a = ['note_seq']
def __init__( self : int, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Union[str, Any] )-> Any:
requires_backends(self, ['''note_seq'''] )
@classmethod
def snake_case ( cls : str, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Any )-> Optional[Any]:
requires_backends(cls, ['''note_seq'''] )
@classmethod
def snake_case ( cls : Optional[Any], *lowerCamelCase : List[str], **lowerCamelCase : Tuple )-> List[str]:
requires_backends(cls, ['''note_seq'''] )
| 706 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = torch.device("cpu")
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : List[Any] =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =dct.pop(__snake_case )
lowerCamelCase__ : Any =val
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =[]
for k in state_dict.keys():
lowerCamelCase__ : int =k
if ".pwconv" in k:
lowerCamelCase__ : List[Any] =k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCamelCase__ : Tuple =k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCamelCase__ : Any =k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCamelCase__ : str =k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCamelCase__ : Optional[Any] =k_new.split('''.''' )
if ls[2].isdigit():
lowerCamelCase__ : int ='''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCamelCase__ : Union[str, Any] =k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Any =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ : List[str] =1000
lowerCamelCase__ : Optional[int] ='''huggingface/label-files'''
lowerCamelCase__ : Union[str, Any] ='''imagenet-1k-id2label.json'''
lowerCamelCase__ : Optional[Any] =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Tuple ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] =idalabel
lowerCamelCase__ : List[str] ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ : int =[3, 3, 6, 4]
lowerCamelCase__ : Union[str, Any] =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ : int =[3, 3, 9, 6]
lowerCamelCase__ : List[Any] =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ : str =[4, 3, 10, 5]
lowerCamelCase__ : int =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ : str =[4, 4, 12, 6]
lowerCamelCase__ : List[Any] =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCamelCase__ : List[Any] =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' , check_hash=__snake_case )
else:
lowerCamelCase__ : Tuple =torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase__ : Union[str, Any] =checkpoint
lowerCamelCase__ : Dict =create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowerCamelCase__ : Any =SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowerCamelCase__ : List[str] =prepare_img()
lowerCamelCase__ : int =ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCamelCase__ : Optional[Any] =processor(images=__snake_case , return_tensors='''pt''' )
# compare outputs from both models
lowerCamelCase__ : int =get_expected_output(__snake_case )
lowerCamelCase__ : Tuple =hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_lowercase : Optional[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
_a = (CMStochasticIterativeScheduler,)
_a = 1_0
def snake_case ( self : Optional[Any], **lowerCamelCase : List[str] )-> Any:
lowerCamelCase__ : List[str] ={
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowercase_ )
return config
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : Optional[int] =10
lowerCamelCase__ : Any =self.get_scheduler_config()
lowerCamelCase__ : Dict =self.scheduler_classes[0](**lowercase_ )
scheduler.set_timesteps(lowercase_ )
lowerCamelCase__ : Dict =scheduler.timesteps[0]
lowerCamelCase__ : Optional[int] =scheduler.timesteps[1]
lowerCamelCase__ : Optional[Any] =self.dummy_sample
lowerCamelCase__ : Any =0.1 * sample
lowerCamelCase__ : List[Any] =scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample
lowerCamelCase__ : Optional[int] =scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case ( self : Any )-> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case ( self : Union[str, Any] )-> List[str]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_ )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : List[Any] =self.scheduler_classes[0]
lowerCamelCase__ : Any =self.get_scheduler_config()
lowerCamelCase__ : Dict =scheduler_class(**lowercase_ )
lowerCamelCase__ : Optional[int] =1
scheduler.set_timesteps(lowercase_ )
lowerCamelCase__ : Any =scheduler.timesteps
lowerCamelCase__ : Optional[Any] =torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =self.dummy_model()
lowerCamelCase__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_ ):
# 1. scale model input
lowerCamelCase__ : int =scheduler.scale_model_input(lowercase_, lowercase_ )
# 2. predict noise residual
lowerCamelCase__ : Optional[int] =model(lowercase_, lowercase_ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : List[Any] =scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
lowerCamelCase__ : str =pred_prev_sample
lowerCamelCase__ : List[Any] =torch.sum(torch.abs(lowercase_ ) )
lowerCamelCase__ : List[Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : int =self.scheduler_classes[0]
lowerCamelCase__ : List[Any] =self.get_scheduler_config()
lowerCamelCase__ : List[str] =scheduler_class(**lowercase_ )
lowerCamelCase__ : List[Any] =[106, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
lowerCamelCase__ : str =scheduler.timesteps
lowerCamelCase__ : Any =torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =self.dummy_model()
lowerCamelCase__ : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase__ : Any =scheduler.scale_model_input(lowercase_, lowercase_ )
# 2. predict noise residual
lowerCamelCase__ : List[Any] =model(lowercase_, lowercase_ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : Optional[Any] =scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
lowerCamelCase__ : Dict =pred_prev_sample
lowerCamelCase__ : List[str] =torch.sum(torch.abs(lowercase_ ) )
lowerCamelCase__ : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Dict )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.scheduler_classes[0]
lowerCamelCase__ : List[Any] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**lowercase_ )
lowerCamelCase__ : List[Any] =[39, 30, 12, 15, 0]
with self.assertRaises(lowercase_, msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def snake_case ( self : Tuple )-> Tuple:
lowerCamelCase__ : List[Any] =self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**lowercase_ )
lowerCamelCase__ : Union[str, Any] =[39, 30, 12, 1, 0]
lowerCamelCase__ : Optional[Any] =len(lowercase_ )
with self.assertRaises(lowercase_, msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase_, timesteps=lowercase_ )
def snake_case ( self : int )-> List[str]:
lowerCamelCase__ : List[str] =self.scheduler_classes[0]
lowerCamelCase__ : str =self.get_scheduler_config()
lowerCamelCase__ : Optional[int] =scheduler_class(**lowercase_ )
lowerCamelCase__ : Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_, msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''', ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 708 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowercase : Optional[int] = '''src/transformers'''
_lowercase : List[str] = '''docs/source/en/tasks'''
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : int =f.readlines()
# Find the start prompt.
lowerCamelCase__ : Dict =0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
lowerCamelCase__ : int =start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Any = direct_transformers_import(TRANSFORMERS_PATH)
_lowercase : List[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowercase : Optional[Any] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase__ : int =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowercase , set() )
lowerCamelCase__ : Optional[Any] ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=False ):
"""simple docstring"""
lowerCamelCase__ : str =_find_text_in_file(
filename=os.path.join(_lowercase , _lowercase ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowerCamelCase__ : Union[str, Any] =get_model_list_for_task(_lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowercase , _lowercase ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 709 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase : List[str] = (3, 9, -1_1, 0, 7, 5, 1, -1)
_lowercase : Optional[int] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 4_2
_a = 4_2
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Iterable[int] )-> None:
lowerCamelCase__ : Dict =None
for i in sorted(__lowerCAmelCase, reverse=__lowerCAmelCase ):
lowerCamelCase__ : Optional[int] =Node(__lowerCAmelCase, self.head )
def __iter__( self : Union[str, Any] )-> Iterator[int]:
lowerCamelCase__ : List[str] =self.head
while node:
yield node.data
lowerCamelCase__ : int =node.next_node
def __len__( self : Tuple )-> int:
return sum(1 for _ in self )
def __str__( self : Union[str, Any] )-> str:
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def snake_case__ ( __lowerCamelCase : SortedLinkedList , __lowerCamelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : str = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
'''simple docstring'''
_a = """trajectory_transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple, lowerCamelCase : int=100, lowerCamelCase : Tuple=5, lowerCamelCase : str=1, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Optional[int]=249, lowerCamelCase : Any=6, lowerCamelCase : Optional[int]=17, lowerCamelCase : str=25, lowerCamelCase : List[Any]=4, lowerCamelCase : int=4, lowerCamelCase : List[Any]=128, lowerCamelCase : str=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : str=0.0_006, lowerCamelCase : Tuple=512, lowerCamelCase : List[Any]=0.02, lowerCamelCase : Union[str, Any]=1E-12, lowerCamelCase : Tuple=1, lowerCamelCase : Dict=True, lowerCamelCase : str=1, lowerCamelCase : str=5_0256, lowerCamelCase : int=5_0256, **lowerCamelCase : List[Any], )-> int:
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : Tuple =action_weight
lowerCamelCase__ : Any =reward_weight
lowerCamelCase__ : List[str] =value_weight
lowerCamelCase__ : Union[str, Any] =max_position_embeddings
lowerCamelCase__ : Optional[int] =block_size
lowerCamelCase__ : Union[str, Any] =action_dim
lowerCamelCase__ : int =observation_dim
lowerCamelCase__ : Optional[Any] =transition_dim
lowerCamelCase__ : Tuple =learning_rate
lowerCamelCase__ : Optional[Any] =n_layer
lowerCamelCase__ : Tuple =n_head
lowerCamelCase__ : int =n_embd
lowerCamelCase__ : Tuple =embd_pdrop
lowerCamelCase__ : Any =attn_pdrop
lowerCamelCase__ : str =resid_pdrop
lowerCamelCase__ : List[str] =initializer_range
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : str =kaiming_initializer_range
lowerCamelCase__ : List[Any] =use_cache
super().__init__(pad_token_id=UpperCAmelCase_, bos_token_id=UpperCAmelCase_, eos_token_id=UpperCAmelCase_, **UpperCAmelCase_ )
| 711 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_lowercase : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_lowercase : str = [0, 2_5, 5_0]
_lowercase : int = [2_5, 5_0, 7_5]
_lowercase : int = fuzz.membership.trimf(X, abca)
_lowercase : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_lowercase : Tuple = np.ones(7_5)
_lowercase : Optional[int] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
_lowercase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_lowercase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_lowercase : int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_lowercase : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_lowercase : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_lowercase : Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_lowercase : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 712 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 0 |
"""simple docstring"""
_lowercase : Union[str, Any] = 6_5_5_2_1
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Dict =1
lowerCamelCase__ : List[str] =0
for plain_chr in plain_text:
lowerCamelCase__ : Optional[Any] =(a + ord(__UpperCamelCase )) % MOD_ADLER
lowerCamelCase__ : int =(b + a) % MOD_ADLER
return (b << 16) | a
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowercase = "\nimport os\n"
_lowercase = "\ndef foo():\n import os\n return False\n"
_lowercase = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowercase = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowercase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.path.join(__lowerCamelCase , '''test_file.py''' )
with open(__lowerCamelCase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
lowerCamelCase__ : Any =get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 714 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
_a = """align_text_model"""
def __init__( self : List[Any], lowerCamelCase : str=3_0522, lowerCamelCase : Optional[int]=768, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : List[str]=12, lowerCamelCase : List[str]=3072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : int=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Tuple=2, lowerCamelCase : str=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : int=0, lowerCamelCase : int="absolute", lowerCamelCase : Any=True, **lowerCamelCase : List[str], )-> int:
super().__init__(**__lowercase )
lowerCamelCase__ : Dict =vocab_size
lowerCamelCase__ : Union[str, Any] =hidden_size
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : str =num_attention_heads
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : Optional[int] =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : int =attention_probs_dropout_prob
lowerCamelCase__ : List[str] =max_position_embeddings
lowerCamelCase__ : str =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Tuple =layer_norm_eps
lowerCamelCase__ : Any =position_embedding_type
lowerCamelCase__ : List[Any] =use_cache
lowerCamelCase__ : int =pad_token_id
@classmethod
def snake_case ( cls : Optional[int], lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : List[str] )-> Any:
cls._set_token_in_kwargs(__lowercase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =cls.get_config_dict(__lowercase, **__lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : Dict =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase, **__lowercase )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
_a = """align_vision_model"""
def __init__( self : Union[str, Any], lowerCamelCase : int = 3, lowerCamelCase : int = 600, lowerCamelCase : float = 2.0, lowerCamelCase : float = 3.1, lowerCamelCase : int = 8, lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3], lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192], lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320], lowerCamelCase : List[int] = [], lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1], lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1], lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6], lowerCamelCase : float = 0.25, lowerCamelCase : str = "swish", lowerCamelCase : int = 2560, lowerCamelCase : str = "mean", lowerCamelCase : float = 0.02, lowerCamelCase : float = 0.001, lowerCamelCase : float = 0.99, lowerCamelCase : float = 0.2, **lowerCamelCase : Union[str, Any], )-> int:
super().__init__(**__lowercase )
lowerCamelCase__ : Tuple =num_channels
lowerCamelCase__ : Any =image_size
lowerCamelCase__ : int =width_coefficient
lowerCamelCase__ : str =depth_coefficient
lowerCamelCase__ : List[Any] =depth_divisor
lowerCamelCase__ : List[str] =kernel_sizes
lowerCamelCase__ : List[str] =in_channels
lowerCamelCase__ : Any =out_channels
lowerCamelCase__ : Union[str, Any] =depthwise_padding
lowerCamelCase__ : Optional[int] =strides
lowerCamelCase__ : Any =num_block_repeats
lowerCamelCase__ : Dict =expand_ratios
lowerCamelCase__ : Union[str, Any] =squeeze_expansion_ratio
lowerCamelCase__ : str =hidden_act
lowerCamelCase__ : Dict =hidden_dim
lowerCamelCase__ : Optional[int] =pooling_type
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : str =batch_norm_eps
lowerCamelCase__ : Optional[Any] =batch_norm_momentum
lowerCamelCase__ : Dict =drop_connect_rate
lowerCamelCase__ : Dict =sum(__lowercase ) * 4
@classmethod
def snake_case ( cls : Optional[Any], lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : Dict )-> Optional[Any]:
cls._set_token_in_kwargs(__lowercase )
lowerCamelCase__ , lowerCamelCase__ : str =cls.get_config_dict(__lowercase, **__lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : int =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase, **__lowercase )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
_a = """align"""
_a = True
def __init__( self : List[Any], lowerCamelCase : int=None, lowerCamelCase : Tuple=None, lowerCamelCase : Any=640, lowerCamelCase : Tuple=1.0, lowerCamelCase : Optional[int]=0.02, **lowerCamelCase : str, )-> Union[str, Any]:
super().__init__(**__lowercase )
if text_config is None:
lowerCamelCase__ : Any ={}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCamelCase__ : Union[str, Any] ={}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCamelCase__ : List[str] =AlignTextConfig(**__lowercase )
lowerCamelCase__ : Dict =AlignVisionConfig(**__lowercase )
lowerCamelCase__ : Any =projection_dim
lowerCamelCase__ : Optional[Any] =temperature_init_value
lowerCamelCase__ : Union[str, Any] =initializer_range
@classmethod
def snake_case ( cls : Tuple, lowerCamelCase : AlignTextConfig, lowerCamelCase : AlignVisionConfig, **lowerCamelCase : int )-> Dict:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__lowercase )
def snake_case ( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Any =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Dict =self.text_config.to_dict()
lowerCamelCase__ : List[str] =self.vision_config.to_dict()
lowerCamelCase__ : Union[str, Any] =self.__class__.model_type
return output
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase : List[Any] = sys.version_info >= (3, 1_0)
def snake_case__ ( __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = 42
_a = 42
_a = 42
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 4_2
_a = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = False
_a = True
_a = None
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
'''simple docstring'''
_a = """titi"""
_a = """toto"""
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
'''simple docstring'''
_a = """titi"""
_a = """toto"""
_a = 4_2
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = "toto"
def snake_case ( self : Tuple )-> str:
lowerCamelCase__ : Dict =BasicEnum(self.foo )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = "toto"
def snake_case ( self : List[Any] )-> Optional[Any]:
lowerCamelCase__ : int =MixedTypeEnum(self.foo )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = None
_a = field(default=UpperCamelCase__ , metadata={'help': 'help message'} )
_a = None
_a = list_field(default=[] )
_a = list_field(default=[] )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = list_field(default=[] )
_a = list_field(default=[1, 2, 3] )
_a = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_a = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = field()
_a = field()
_a = field()
def snake_case ( self : Dict )-> Dict:
lowerCamelCase__ : Optional[int] =BasicEnum(self.required_enum )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = field()
_a = None
_a = field(default='toto' , metadata={'help': 'help message'} )
_a = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = False
_a = True
_a = None
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = None
_a = field(default=UpperCamelCase__ , metadata={'help': 'help message'} )
_a = None
_a = list_field(default=[] )
_a = list_field(default=[] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int] )-> Any:
self.assertEqual(len(a._actions ), len(b._actions ) )
for x, y in zip(a._actions, b._actions ):
lowerCamelCase__ : Tuple ={k: v for k, v in vars(lowerCamelCase ).items() if k != '''container'''}
lowerCamelCase__ : Dict ={k: v for k, v in vars(lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''', lowerCamelCase ) and yy.get('''choices''', lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowerCamelCase ), yy['''type'''](lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : Any =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Any =argparse.ArgumentParser()
expected.add_argument('''--foo''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument('''--bar''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument('''--baz''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument('''--flag''', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='''?''' )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((lowerCamelCase__ ) , ) : List[str] =parser.parse_args_into_dataclasses(lowerCamelCase, look_for_args_file=lowerCamelCase )
self.assertFalse(example.flag )
def snake_case ( self : Union[str, Any] )-> Dict:
lowerCamelCase__ : List[str] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : str =argparse.ArgumentParser()
expected.add_argument('''--foo''', default=42, type=lowerCamelCase )
expected.add_argument('''--baz''', default='''toto''', type=lowerCamelCase, help='''help message''' )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Dict =argparse.ArgumentParser()
expected.add_argument('''--foo''', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='''?''' )
expected.add_argument('''--baz''', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''', action='''store_false''', default=lowerCamelCase, dest='''baz''' )
expected.add_argument('''--opt''', type=lowerCamelCase, default=lowerCamelCase )
lowerCamelCase__ : Dict =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
lowerCamelCase__ : Optional[Any] =HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =parser.parse_args([] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase ) )
lowerCamelCase__ : List[str] =parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] =parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase ) )
lowerCamelCase__ : Tuple =parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase ) )
lowerCamelCase__ : Dict =parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase ) )
def snake_case ( self : Union[str, Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Any =argparse.ArgumentParser()
expected.add_argument(
'''--foo''', default='''toto''', choices=['''titi''', '''toto''', 42], type=make_choice_type_function(['''titi''', '''toto''', 42] ), )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] =parser.parse_args([] )
self.assertEqual(args.foo, '''toto''' )
lowerCamelCase__ : Union[str, Any] =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.toto )
lowerCamelCase__ : Optional[int] =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo, '''titi''' )
lowerCamelCase__ : Optional[int] =parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.titi )
lowerCamelCase__ : str =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo, 42 )
lowerCamelCase__ : int =parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo )
def snake_case ( self : Optional[int] )-> List[str]:
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = "toto"
lowerCamelCase__ : Tuple =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Any =argparse.ArgumentParser()
expected.add_argument(
'''--foo''', default='''toto''', choices=('''titi''', '''toto''', 42), type=make_choice_type_function(['''titi''', '''toto''', 42] ), )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =parser.parse_args([] )
self.assertEqual(args.foo, '''toto''' )
lowerCamelCase__ : Optional[Any] =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo, '''titi''' )
lowerCamelCase__ : List[str] =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo, 42 )
def snake_case ( self : str )-> str:
lowerCamelCase__ : Tuple =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
expected.add_argument('''--foo_int''', nargs='''+''', default=[], type=lowerCamelCase )
expected.add_argument('''--bar_int''', nargs='''+''', default=[1, 2, 3], type=lowerCamelCase )
expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=lowerCamelCase )
expected.add_argument('''--foo_float''', nargs='''+''', default=[0.1, 0.2, 0.3], type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[Any] =parser.parse_args([] )
self.assertEqual(
lowerCamelCase, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=['''Hallo''', '''Bonjour''', '''Hello'''], foo_float=[0.1, 0.2, 0.3] ), )
lowerCamelCase__ : Union[str, Any] =parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowerCamelCase, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=['''a''', '''b''', '''c'''], foo_float=[0.1, 0.7] ) )
def snake_case ( self : Any )-> Optional[int]:
lowerCamelCase__ : Dict =argparse.ArgumentParser()
expected.add_argument('''--foo''', default=lowerCamelCase, type=lowerCamelCase )
expected.add_argument('''--bar''', default=lowerCamelCase, type=lowerCamelCase, help='''help message''' )
expected.add_argument('''--baz''', default=lowerCamelCase, type=lowerCamelCase )
expected.add_argument('''--ces''', nargs='''+''', default=[], type=lowerCamelCase )
expected.add_argument('''--des''', nargs='''+''', default=[], type=lowerCamelCase )
lowerCamelCase__ : str =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
lowerCamelCase__ : Optional[Any] =HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =parser.parse_args([] )
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, bar=lowerCamelCase, baz=lowerCamelCase, ces=[], des=[] ) )
lowerCamelCase__ : Optional[Any] =parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowerCamelCase, Namespace(foo=12, bar=3.14, baz='''42''', ces=['''a''', '''b''', '''c'''], des=[1, 2, 3] ) )
def snake_case ( self : List[Any] )-> Any:
lowerCamelCase__ : Optional[int] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Any =argparse.ArgumentParser()
expected.add_argument('''--required_list''', nargs='''+''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument('''--required_str''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument(
'''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=lowerCamelCase, )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : List[str] =argparse.ArgumentParser()
expected.add_argument('''--foo''', type=lowerCamelCase, required=lowerCamelCase )
expected.add_argument(
'''--required_enum''', type=make_choice_type_function(['''titi''', '''toto'''] ), choices=['''titi''', '''toto'''], required=lowerCamelCase, )
expected.add_argument('''--opt''', type=lowerCamelCase, default=lowerCamelCase )
expected.add_argument('''--baz''', default='''toto''', type=lowerCamelCase, help='''help message''' )
expected.add_argument('''--foo_str''', nargs='''+''', default=['''Hallo''', '''Bonjour''', '''Hello'''], type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ : Dict =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Any ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
lowerCamelCase__ : Optional[Any] =parser.parse_dict(lowerCamelCase )[0]
lowerCamelCase__ : Tuple =BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : Any =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Tuple ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowerCamelCase, parser.parse_dict, lowerCamelCase, allow_extra_keys=lowerCamelCase )
def snake_case ( self : List[Any] )-> List[str]:
lowerCamelCase__ : Optional[Any] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : int ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int =os.path.join(lowerCamelCase, '''temp_json''' )
os.mkdir(lowerCamelCase )
with open(temp_local_path + '''.json''', '''w+''' ) as f:
json.dump(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
lowerCamelCase__ : Optional[Any] =BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =HfArgumentParser(lowerCamelCase )
lowerCamelCase__ : Optional[int] ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Dict =os.path.join(lowerCamelCase, '''temp_yaml''' )
os.mkdir(lowerCamelCase )
with open(temp_local_path + '''.yaml''', '''w+''' ) as f:
yaml.dump(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
lowerCamelCase__ : Union[str, Any] =BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : Dict =HfArgumentParser(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =point_y / 4 / point_x
lowerCamelCase__ : List[str] =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase__ : List[Any] =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase__ : Union[str, Any] =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase__ : Tuple =outgoing_gradient**2 + 4
lowerCamelCase__ : Optional[int] =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase__ : Optional[int] =(point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase__ : str =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase__ : Optional[int] =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase__ : List[str] =x_minus if isclose(__lowerCAmelCase , __lowerCAmelCase ) else x_plus
lowerCamelCase__ : Tuple =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case__ ( __lowerCamelCase : List[Any] = 1.4 , __lowerCamelCase : int = -9.6 ):
"""simple docstring"""
lowerCamelCase__ : int =0
lowerCamelCase__ : float =first_x_coord
lowerCamelCase__ : float =first_y_coord
lowerCamelCase__ : float =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase__ : str =next_point(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any] = "cpu", lowerCamelCase : List[str] = "openai/clip-vit-large-patch14" )-> None:
lowerCamelCase__ : Optional[int] =device
lowerCamelCase__ : Optional[Any] =CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
lowerCamelCase__ : int =[0.48_145_466, 0.4_578_275, 0.40_821_073]
lowerCamelCase__ : List[Any] =[0.26_862_954, 0.26_130_258, 0.27_577_711]
lowerCamelCase__ : Optional[Any] =torchvision.transforms.Normalize(self.image_mean, self.image_std )
lowerCamelCase__ : Optional[Any] =torchvision.transforms.Resize(224 )
lowerCamelCase__ : str =torchvision.transforms.CenterCrop(224 )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Tuple =self.resize(lowerCAmelCase_ )
lowerCamelCase__ : Any =self.center_crop(lowerCAmelCase_ )
lowerCamelCase__ : str =self.normalize(lowerCAmelCase_ )
return images
def __call__( self : str, lowerCamelCase : Any=None, lowerCamelCase : Optional[int]=None, **lowerCamelCase : str )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =self.tokenizer(text=lowerCAmelCase_, **lowerCAmelCase_ )
lowerCamelCase__ : Optional[Any] =self.preprocess_img(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Any=10, lowerCamelCase : str=0.01, lowerCamelCase : Optional[int]=None, lowerCamelCase : Dict=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Any=None, lowerCamelCase : Dict=None, lowerCamelCase : List[Any]=False, lowerCamelCase : List[str]=True, lowerCamelCase : Dict="image", lowerCamelCase : Dict=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : int=False, lowerCamelCase : Any=False, )-> None:
super().__init__()
lowerCamelCase__ : str =None
lowerCamelCase__ : Union[str, Any] =device if device else get_device()
if vqgan:
lowerCamelCase__ : Dict =vqgan
else:
lowerCamelCase__ : Dict =load_vqgan(self.device, conf_path=lowerCAmelCase_, ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
lowerCamelCase__ : Any =clip
else:
lowerCamelCase__ : Any =CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCamelCase__ : List[str] =ProcessorGradientFlow(device=self.device )
lowerCamelCase__ : str =iterations
lowerCamelCase__ : int =lr
lowerCamelCase__ : Tuple =log
lowerCamelCase__ : Optional[int] =make_grid
lowerCamelCase__ : Tuple =return_val
lowerCamelCase__ : List[str] =quantize
lowerCamelCase__ : str =self.vqgan.decoder.z_shape
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=5, lowerCamelCase : Dict=True )-> Any:
lowerCamelCase__ : Tuple =[]
if output_path is None:
lowerCamelCase__ : Optional[int] ='''./animation.gif'''
if input_path is None:
lowerCamelCase__ : int =self.save_path
lowerCamelCase__ : Optional[Any] =sorted(glob(input_path + '''/*''' ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCAmelCase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCamelCase__ : Any =total_duration / len(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] =[frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
lowerCamelCase__ : Tuple =1.5
lowerCamelCase__ : Dict =3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_, lowerCAmelCase_, duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def snake_case ( self : Optional[Any], lowerCamelCase : List[Any]=None, lowerCamelCase : int=None )-> str:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCamelCase__ : Dict =preprocess(Image.open(lowerCAmelCase_ ), target_image_size=256 ).to(self.device )
lowerCamelCase__ : Any =preprocess_vqgan(lowerCAmelCase_ )
lowerCamelCase__ , *lowerCamelCase__ : Union[str, Any] =self.vqgan.encode(lowerCAmelCase_ )
return z
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] =self.latent.detach().requires_grad_()
lowerCamelCase__ : List[Any] =base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__ : str =self.vqgan.quantize(lowerCAmelCase_ )
else:
lowerCamelCase__ : Any =trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def snake_case ( self : str, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : Optional[int]=None )-> Tuple:
lowerCamelCase__ : Union[str, Any] =self.clip_preprocessor(text=lowerCAmelCase_, images=lowerCAmelCase_, return_tensors='''pt''', padding=lowerCAmelCase_ )
lowerCamelCase__ : int =self.clip(**lowerCAmelCase_ )
lowerCamelCase__ : Optional[Any] =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__ : Optional[int] =similarity_logits * weights
return similarity_logits.sum()
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : Any )-> int:
lowerCamelCase__ : Tuple =self._get_clip_similarity(pos_prompts['''prompts'''], lowerCAmelCase_, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCamelCase__ : Tuple =self._get_clip_similarity(neg_prompts['''prompts'''], lowerCAmelCase_, weights=neg_prompts['''weights'''] )
else:
lowerCamelCase__ : List[Any] =torch.tensor([1], device=self.device )
lowerCamelCase__ : Any =-torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Any:
lowerCamelCase__ : Dict =torch.randn_like(self.latent, requires_grad=lowerCAmelCase_, device=self.device )
lowerCamelCase__ : Optional[int] =torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase__ : Union[str, Any] =self._add_vector(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] =loop_post_process(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] =self._get_CLIP_loss(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
print('''CLIP loss''', lowerCAmelCase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : List[str] )-> List[str]:
wandb.init(reinit=lowerCAmelCase_, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCamelCase__ : Dict =Image.open(lowerCAmelCase_ )
lowerCamelCase__ : str =image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(lowerCAmelCase_ ) )
def snake_case ( self : Dict, lowerCamelCase : Dict )-> Union[str, Any]:
if not prompts:
return []
lowerCamelCase__ : int =[]
lowerCamelCase__ : Dict =[]
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
lowerCamelCase__ : int =[prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCAmelCase_, (tuple, list) ):
lowerCamelCase__ : Tuple =prompt[0]
lowerCamelCase__ : Optional[Any] =float(prompt[1] )
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__ : List[str] =prompt.split(''':''' )
lowerCamelCase__ : List[str] =float(lowerCAmelCase_ )
else:
lowerCamelCase__ : List[str] =prompt
lowerCamelCase__ : Optional[Any] =1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_, device=self.device ),
}
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=True, lowerCamelCase : List[str]=False, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]=None, )-> str:
if image_path:
lowerCamelCase__ : Tuple =self._get_latent(lowerCAmelCase_ )
else:
lowerCamelCase__ : Optional[Any] =torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__ : Any =self.process_prompts(lowerCAmelCase_ )
lowerCamelCase__ : Union[str, Any] =self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
lowerCamelCase__ : Union[str, Any] =os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
lowerCamelCase__ : Optional[Any] =save_path + '''_''' + get_timestamp()
os.makedirs(lowerCAmelCase_ )
lowerCamelCase__ : Optional[int] =save_path
lowerCamelCase__ : Tuple =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
lowerCamelCase__ : Any =loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}_final.png''' ) )
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger("transformers.models.encodec")
_lowercase : Optional[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowercase : Any = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowercase : int = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowercase : List[str] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowercase : Any = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowercase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowercase : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowercase : Optional[int] = []
_lowercase : str = []
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : Tuple =getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
lowerCamelCase__ : str =getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
lowerCamelCase__ : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase__ : Dict =value
elif weight_type == "weight_g":
lowerCamelCase__ : str =value
elif weight_type == "weight_v":
lowerCamelCase__ : Any =value
elif weight_type == "bias":
lowerCamelCase__ : Union[str, Any] =value
elif weight_type == "running_mean":
lowerCamelCase__ : int =value
elif weight_type == "running_var":
lowerCamelCase__ : int =value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_ih_l0":
lowerCamelCase__ : Tuple =value
elif weight_type == "weight_hh_l0":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_ih_l0":
lowerCamelCase__ : Any =value
elif weight_type == "bias_hh_l0":
lowerCamelCase__ : Tuple =value
elif weight_type == "weight_ih_l1":
lowerCamelCase__ : int =value
elif weight_type == "weight_hh_l1":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_ih_l1":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "bias_hh_l1":
lowerCamelCase__ : List[Any] =value
else:
lowerCamelCase__ : Tuple =value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ : List[str] =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase__ : int =MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase__ : Union[str, Any] =MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(f'''{name} was ignored''' )
continue
lowerCamelCase__ : Union[str, Any] =False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase__ : str =key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCamelCase__ : List[str] =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCamelCase__ : List[str] =True
if "*" in mapped_key:
lowerCamelCase__ : str =name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
lowerCamelCase__ : Union[str, Any] =mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
lowerCamelCase__ : Dict ="""weight_g"""
elif "weight_v" in name:
lowerCamelCase__ : Tuple ="""weight_v"""
elif "weight_ih_l0" in name:
lowerCamelCase__ : Union[str, Any] ="""weight_ih_l0"""
elif "weight_hh_l0" in name:
lowerCamelCase__ : Tuple ="""weight_hh_l0"""
elif "bias_ih_l0" in name:
lowerCamelCase__ : List[Any] ="""bias_ih_l0"""
elif "bias_hh_l0" in name:
lowerCamelCase__ : str ="""bias_hh_l0"""
elif "weight_ih_l1" in name:
lowerCamelCase__ : Any ="""weight_ih_l1"""
elif "weight_hh_l1" in name:
lowerCamelCase__ : List[str] ="""weight_hh_l1"""
elif "bias_ih_l1" in name:
lowerCamelCase__ : Dict ="""bias_ih_l1"""
elif "bias_hh_l1" in name:
lowerCamelCase__ : Dict ="""bias_hh_l1"""
elif "bias" in name:
lowerCamelCase__ : Optional[int] ="""bias"""
elif "weight" in name:
lowerCamelCase__ : str ="""weight"""
elif "running_mean" in name:
lowerCamelCase__ : List[str] ="""running_mean"""
elif "running_var" in name:
lowerCamelCase__ : Any ="""running_var"""
elif "num_batches_tracked" in name:
lowerCamelCase__ : Tuple ="""num_batches_tracked"""
else:
lowerCamelCase__ : List[str] =None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : Any =EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase__ : Optional[int] =[8, 5, 4, 4]
lowerCamelCase__ : List[Any] =[2.2]
lowerCamelCase__ : List[str] =64
lowerCamelCase__ : Optional[Any] =32000
lowerCamelCase__ : List[str] =2048
lowerCamelCase__ : Dict =False
lowerCamelCase__ : List[Any] =False
lowerCamelCase__ : Any =False
elif model_name == "encodec_48khz":
lowerCamelCase__ : Dict =[8, 5, 4, 2]
lowerCamelCase__ : Optional[Any] =[3.0, 6.0, 12.0, 24.0]
lowerCamelCase__ : Optional[int] =48000
lowerCamelCase__ : str =2
lowerCamelCase__ : Tuple =False
lowerCamelCase__ : Union[str, Any] ="""time_group_norm"""
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Dict =1.0
lowerCamelCase__ : str =0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCamelCase__ : Optional[Any] =EncodecModel(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Any =torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase__ : Optional[Any] =original_checkpoint["""best_state"""]
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowercase : Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = (CMStochasticIterativeScheduler,)
_a = 1_0
def snake_case ( self : Tuple, **lowerCamelCase : Union[str, Any] )-> int:
lowerCamelCase__ : Union[str, Any] ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**UpperCAmelCase__ )
return config
def snake_case ( self : int )-> Dict:
lowerCamelCase__ : int =10
lowerCamelCase__ : str =self.get_scheduler_config()
lowerCamelCase__ : Optional[int] =self.scheduler_classes[0](**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
lowerCamelCase__ : List[str] =scheduler.timesteps[0]
lowerCamelCase__ : List[str] =scheduler.timesteps[1]
lowerCamelCase__ : Optional[int] =self.dummy_sample
lowerCamelCase__ : Optional[int] =0.1 * sample
lowerCamelCase__ : Dict =scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).prev_sample
lowerCamelCase__ : List[Any] =scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case ( self : Tuple )-> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def snake_case ( self : Dict )-> List[str]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase__ )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.scheduler_classes[0]
lowerCamelCase__ : str =self.get_scheduler_config()
lowerCamelCase__ : int =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase__ : List[Any] =1
scheduler.set_timesteps(UpperCAmelCase__ )
lowerCamelCase__ : Tuple =scheduler.timesteps
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Dict =self.dummy_model()
lowerCamelCase__ : Dict =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase__ ):
# 1. scale model input
lowerCamelCase__ : List[Any] =scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict noise residual
lowerCamelCase__ : int =model(UpperCAmelCase__, UpperCAmelCase__ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : Optional[Any] =scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
lowerCamelCase__ : int =pred_prev_sample
lowerCamelCase__ : Union[str, Any] =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : int )-> str:
lowerCamelCase__ : Dict =self.scheduler_classes[0]
lowerCamelCase__ : Any =self.get_scheduler_config()
lowerCamelCase__ : str =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase__ : List[Any] =[106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
lowerCamelCase__ : List[str] =scheduler.timesteps
lowerCamelCase__ : Any =torch.manual_seed(0 )
lowerCamelCase__ : Dict =self.dummy_model()
lowerCamelCase__ : Dict =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase__ : Optional[int] =scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict noise residual
lowerCamelCase__ : Tuple =model(UpperCAmelCase__, UpperCAmelCase__ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : Any =scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
lowerCamelCase__ : Union[str, Any] =pred_prev_sample
lowerCamelCase__ : Optional[int] =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCamelCase__ : int =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : Optional[int] =self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase__ : Dict =[39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase__, msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def snake_case ( self : Any )-> Optional[int]:
lowerCamelCase__ : Any =self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__ : List[Any] =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] =[39, 30, 12, 1, 0]
lowerCamelCase__ : List[str] =len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__, msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__, timesteps=UpperCAmelCase__ )
def snake_case ( self : Union[str, Any] )-> Dict:
lowerCamelCase__ : Tuple =self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
lowerCamelCase__ : Any =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__, msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''', ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
_a = ['image_processor', 'tokenizer']
_a = 'AutoImageProcessor'
_a = 'AutoTokenizer'
def __init__( self : List[Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str=None, **lowerCamelCase : Optional[Any] )-> str:
lowerCamelCase__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', A__, )
lowerCamelCase__ : Dict =kwargs.pop('''feature_extractor''' )
lowerCamelCase__ : Any =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A__, A__ )
lowerCamelCase__ : int =self.image_processor
lowerCamelCase__ : Dict =False
def __call__( self : List[Any], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__, **A__ )
lowerCamelCase__ : Optional[int] =kwargs.pop('''images''', A__ )
lowerCamelCase__ : Optional[int] =kwargs.pop('''text''', A__ )
if len(A__ ) > 0:
lowerCamelCase__ : Dict =args[0]
lowerCamelCase__ : Optional[int] =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase__ : List[Any] =self.image_processor(A__, *A__, **A__ )
if text is not None:
lowerCamelCase__ : List[Any] =self.tokenizer(A__, **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__ : Optional[Any] =encodings['''input_ids''']
return inputs
def snake_case ( self : str, *lowerCamelCase : List[str], **lowerCamelCase : Union[str, Any] )-> Tuple:
return self.tokenizer.batch_decode(*A__, **A__ )
def snake_case ( self : int, *lowerCamelCase : int, **lowerCamelCase : Dict )-> List[str]:
return self.tokenizer.decode(*A__, **A__ )
@contextmanager
def snake_case ( self : List[str] )-> Tuple:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[Any] =self.tokenizer
yield
lowerCamelCase__ : Optional[int] =self.image_processor
lowerCamelCase__ : List[str] =False
def snake_case ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Any=None )-> List[str]:
if added_vocab is None:
lowerCamelCase__ : int =self.tokenizer.get_added_vocab()
lowerCamelCase__ : Optional[Any] ={}
while tokens:
lowerCamelCase__ : List[str] =re.search(r'''<s_(.*?)>''', A__, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase__ : List[str] =start_token.group(1 )
lowerCamelCase__ : Any =re.search(rF'''</s_{key}>''', A__, re.IGNORECASE )
lowerCamelCase__ : int =start_token.group()
if end_token is None:
lowerCamelCase__ : int =tokens.replace(A__, '''''' )
else:
lowerCamelCase__ : Optional[Any] =end_token.group()
lowerCamelCase__ : Dict =re.escape(A__ )
lowerCamelCase__ : Optional[Any] =re.escape(A__ )
lowerCamelCase__ : str =re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', A__, re.IGNORECASE )
if content is not None:
lowerCamelCase__ : Union[str, Any] =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase__ : Optional[Any] =self.tokenajson(A__, is_inner_value=A__, added_vocab=A__ )
if value:
if len(A__ ) == 1:
lowerCamelCase__ : Tuple =value[0]
lowerCamelCase__ : Union[str, Any] =value
else: # leaf nodes
lowerCamelCase__ : Optional[int] =[]
for leaf in content.split(r'''<sep/>''' ):
lowerCamelCase__ : str =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase__ : Union[str, Any] =leaf[1:-2] # for categorical special tokens
output[key].append(A__ )
if len(output[key] ) == 1:
lowerCamelCase__ : Optional[Any] =output[key][0]
lowerCamelCase__ : List[str] =tokens[tokens.find(A__ ) + len(A__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=A__, added_vocab=A__ )
if len(A__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def snake_case ( self : Optional[Any] )-> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', A__, )
return self.image_processor_class
@property
def snake_case ( self : Optional[Any] )-> int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', A__, )
return self.image_processor
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_lowercase : str = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] = None ):
"""simple docstring"""
lowerCamelCase__ : int =f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , _SCREAMING_SNAKE_CASE ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =requirement, None, None
else:
lowerCamelCase__ : List[Any] =re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Tuple =match[0]
lowerCamelCase__ : Optional[Any] =want_full.split(''',''' ) # there could be multiple requirements
lowerCamelCase__ : str ={}
for w in want_range:
lowerCamelCase__ : Any =re.findall(R'''^([\s!=<>]{1,2})(.+)''' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =match[0]
lowerCamelCase__ : int =want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowerCamelCase__ : str ='''.'''.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCamelCase__ : List[str] =importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ='''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =np.max(_outputs , axis=-1 , keepdims=lowercase__ )
lowerCamelCase__ : int =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class __SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
_a = 'sigmoid'
_a = 'softmax'
_a = 'none'
@add_end_docstrings(
__snake_case , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n ' , )
class __SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
_a = False
_a = ClassificationFunction.NONE
def __init__( self : Dict, **lowerCamelCase : Optional[Any] )-> Tuple:
super().__init__(**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[int]="", **lowerCamelCase : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Tuple =tokenizer_kwargs
lowerCamelCase__ : Dict ={}
if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None:
lowerCamelCase__ : Optional[Any] =self.model.config.return_all_scores
if isinstance(__UpperCamelCase, __UpperCamelCase ) or top_k is None:
lowerCamelCase__ : List[str] =top_k
lowerCamelCase__ : List[Any] =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', __UpperCamelCase, )
if return_all_scores:
lowerCamelCase__ : Union[str, Any] =None
else:
lowerCamelCase__ : str =1
if isinstance(__UpperCamelCase, __UpperCamelCase ):
lowerCamelCase__ : str =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase__ : Optional[Any] =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> str:
lowerCamelCase__ : Any =super().__call__(*__UpperCamelCase, **__UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase__ : List[str] ='''top_k''' not in kwargs
if isinstance(args[0], __UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self : int, lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.framework
if isinstance(__UpperCamelCase, __UpperCamelCase ):
return self.tokenizer(**__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
elif isinstance(__UpperCamelCase, __UpperCamelCase ) and len(__UpperCamelCase ) == 1 and isinstance(inputs[0], __UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=__UpperCamelCase, **__UpperCamelCase )
elif isinstance(__UpperCamelCase, __UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Union[str, Any], lowerCamelCase : List[str] )-> Optional[Any]:
return self.model(**__UpperCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any]=None, lowerCamelCase : List[Any]=1, lowerCamelCase : Any=True )-> List[Any]:
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase__ : Optional[int] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase__ : Optional[int] =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None:
lowerCamelCase__ : List[str] =self.model.config.function_to_apply
else:
lowerCamelCase__ : Optional[int] =ClassificationFunction.NONE
lowerCamelCase__ : str =model_outputs['''logits'''][0]
lowerCamelCase__ : Dict =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase__ : Dict =sigmoid(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase__ : Optional[Any] =softmax(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase__ : Any =outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase__ : Optional[Any] =[
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase : x["score"], reverse=__UpperCamelCase )
if top_k is not None:
lowerCamelCase__ : Union[str, Any] =dict_scores[:top_k]
return dict_scores
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple )-> None:
if len(lowercase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowerCamelCase__ : list[float] =list(lowercase__ )
lowerCamelCase__ : Optional[int] =degree
def __add__( self : List[str], lowerCamelCase : Tuple )-> Polynomial:
if self.degree > polynomial_a.degree:
lowerCamelCase__ : Optional[Any] =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, lowercase__ )
else:
lowerCamelCase__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, lowercase__ )
def __sub__( self : str, lowerCamelCase : Optional[Any] )-> Polynomial:
return self + polynomial_a * Polynomial(0, [-1] )
def __neg__( self : Any )-> Polynomial:
return Polynomial(self.degree, [-c for c in self.coefficients] )
def __mul__( self : str, lowerCamelCase : List[Any] )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, lowercase__ )
def snake_case ( self : str, lowerCamelCase : str )-> int | float:
lowerCamelCase__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str )-> str:
lowerCamelCase__ : str =''''''
for i in range(self.degree, -1, -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase__ )
return polynomial
def __repr__( self : Union[str, Any] )-> str:
return self.__str__()
def snake_case ( self : Union[str, Any] )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * self.degree
for i in range(self.degree ):
lowerCamelCase__ : Optional[Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, lowercase__ )
def snake_case ( self : Union[str, Any], lowerCamelCase : str = 0 )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * (self.degree + 2)
lowerCamelCase__ : Union[str, Any] =constant
for i in range(self.degree + 1 ):
lowerCamelCase__ : List[Any] =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, lowercase__ )
def __eq__( self : Any, lowerCamelCase : Optional[int] )-> bool:
if not isinstance(lowercase__, lowercase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str], lowerCamelCase : str )-> bool:
return not self.__eq__(lowercase__ )
| 703 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_lowercase : Optional[int] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, '''models/bert/''' ) )
lowerCamelCase__ : str =self.transformer_dir
shutil.copy(
os.path.join(lowercase__, '''src/transformers/models/bert/modeling_bert.py''' ), os.path.join(self.transformer_dir, '''models/bert/modeling_bert.py''' ), )
def snake_case ( self : Tuple )-> List[str]:
lowerCamelCase__ : Optional[Any] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str]=None )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCamelCase__ : List[Any] =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCamelCase__ : str =black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
lowerCamelCase__ : Optional[int] =black.format_str(lowercase__, mode=lowercase__ )
lowerCamelCase__ : Any =os.path.join(self.transformer_dir, '''new_code.py''' )
with open(lowercase__, '''w''', newline='''\n''' ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=lowercase__ )
with open(lowercase__, '''r''' ) as f:
self.assertTrue(f.read(), lowercase__ )
def snake_case ( self : List[Any] )-> int:
lowerCamelCase__ : Optional[Any] =check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowercase__, lowercase__ )
def snake_case ( self : int )-> List[Any]:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', lowercase__, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', re.sub('''Bert''', '''TestModel''', lowercase__ ), )
# Copy consistency with a really long name
lowerCamelCase__ : List[Any] ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''', F'''{long_class_name}LMPredictionHead''', re.sub('''Bert''', lowercase__, lowercase__ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', lowercase__, overwrite_result=re.sub('''Bert''', '''TestModel''', lowercase__ ), )
def snake_case ( self : Any )-> int:
lowerCamelCase__ : List[Any] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowerCamelCase__ : List[Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowerCamelCase__ : Optional[int] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCamelCase__ : Tuple =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowerCamelCase__ : Any =check_copies.convert_to_localized_md(
lowercase__, lowercase__, localized_readme['''format_model_list'''] )
self.assertFalse(lowercase__ )
self.assertEqual(lowercase__, lowercase__ )
lowerCamelCase__ : Tuple =check_copies.convert_to_localized_md(
lowercase__, lowercase__, localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase__ )
lowerCamelCase__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowerCamelCase__ : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCamelCase__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCamelCase__ : Dict =check_copies.convert_to_localized_md(
lowercase__, lowercase__, localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowercase__, lowercase__ )
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : int | float | str , __lowerCamelCase : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
lowerCamelCase__ : List[str] =int(a_ )
lowerCamelCase__ : List[Any] =int(a_ )
lowerCamelCase__ : list[str] =[]
for temp in range(int(a_ ) ):
series.append(f'''1 / {pow(temp + 1 , int(a_ ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] = int(input("Enter the last number (nth term) of the P-Series"))
_lowercase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
_lowercase : Any = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int ={
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCamelCase__ : Dict =int(re.match(R'''.*layer_(\d*).*''' , __lowerCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCamelCase__ : Tuple =re.search(R'''[^\d](\d+)$''' , str(__lowerCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCamelCase__ : Union[str, Any] =int(bit_search.groups()[0] )
return bit_size // 8
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
if bloom_config_file == "":
lowerCamelCase__ : Tuple =BloomConfig()
else:
lowerCamelCase__ : Tuple =BloomConfig.from_json_file(__lowerCamelCase )
if shard_model:
lowerCamelCase__ : List[str] =os.listdir(__lowerCamelCase )
lowerCamelCase__ : List[str] =sorted(filter(lambda __lowerCamelCase : s.startswith('''layer''' ) and "model_00" in s , __lowerCamelCase ) )
lowerCamelCase__ : Any ={'''weight_map''': {}, '''metadata''': {}}
lowerCamelCase__ : Any =0
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Tuple =BloomConfig()
for j, file in enumerate(__lowerCamelCase ):
print('''Processing file: {}'''.format(__lowerCamelCase ) )
lowerCamelCase__ : Any =None
for i in range(__lowerCamelCase ):
# load all TP files
lowerCamelCase__ : Dict =file.replace('''model_00''' , f'''model_0{i}''' )
lowerCamelCase__ : List[str] =torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCamelCase__ : List[str] =list(temp.keys() )
for key in keys:
lowerCamelCase__ : Dict =temp.pop(__lowerCamelCase )
if tensors is None:
lowerCamelCase__ : List[str] =temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : int =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : Tuple =torch.cat([tensors[key], temp[key]] , dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : Tuple =tensors[key] / pretraining_tp
torch.save(
__lowerCamelCase , os.path.join(
__lowerCamelCase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(__lowerCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCamelCase__ : Any =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCamelCase__ : List[str] ='''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(__lowerCamelCase ) ).zfill(5 ) )
lowerCamelCase__ : Dict =BloomConfig()
lowerCamelCase__ : int =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCamelCase__ : str =total_size
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCamelCase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple =json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =BloomModel(__lowerCamelCase )
lowerCamelCase__ : List[str] =os.listdir(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =sorted(filter(lambda __lowerCamelCase : s.startswith('''layer''' ) and "model_00" in s , __lowerCamelCase ) )
lowerCamelCase__ : Dict =None
for i, file in enumerate(__lowerCamelCase ):
lowerCamelCase__ : List[Any] =None
for i in range(__lowerCamelCase ):
# load all TP files
lowerCamelCase__ : Optional[int] =file.replace('''model_00''' , f'''model_0{i}''' )
lowerCamelCase__ : Dict =torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCamelCase__ : Union[str, Any] =list(temp.keys() )
for key in keys:
lowerCamelCase__ : Any =temp.pop(__lowerCamelCase )
if tensors is None:
lowerCamelCase__ : List[str] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : Dict =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : Optional[Any] =torch.cat([tensors[key], temp[key]] , dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : List[str] =tensors[key] / pretraining_tp
lowerCamelCase__ : Dict =model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCamelCase__ : Optional[int] =set(other_keys.missing_keys )
else:
lowerCamelCase__ : Tuple =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ : Optional[int] =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCamelCase__ : Union[str, Any] =model.to(config.torch_dtype )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_lowercase : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 706 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : float = 0 )-> None:
lowerCamelCase__ : Any =row, column
lowerCamelCase__ : List[str] =[[default_value for c in range(__a )] for r in range(__a )]
def __str__( self : Any )-> str:
lowerCamelCase__ : Optional[Any] =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ : List[Any] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ : Dict =max(__a, len(str(__a ) ) )
lowerCamelCase__ : Dict =F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ : Any ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__a ) for row_vector in self.array )
return s
def __repr__( self : List[Any] )-> str:
return str(self )
def snake_case ( self : List[str], lowerCamelCase : tuple[int, int] )-> bool:
if not (isinstance(__a, (list, tuple) ) and len(__a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict, lowerCamelCase : tuple[int, int] )-> Any:
assert self.validate_indicies(__a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple, lowerCamelCase : tuple[int, int], lowerCamelCase : float )-> None:
assert self.validate_indicies(__a )
lowerCamelCase__ : List[Any] =value
def __add__( self : str, lowerCamelCase : Matrix )-> Matrix:
assert isinstance(__a, __a )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ : Any =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict =self[r, c] + another[r, c]
return result
def __neg__( self : str )-> Matrix:
lowerCamelCase__ : List[str] =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Tuple =-self[r, c]
return result
def __sub__( self : Any, lowerCamelCase : Matrix )-> Matrix:
return self + (-another)
def __mul__( self : Dict, lowerCamelCase : int | float | Matrix )-> Matrix:
if isinstance(__a, (int, float) ): # Scalar multiplication
lowerCamelCase__ : List[Any] =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict =self[r, c] * another
return result
elif isinstance(__a, __a ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ : Union[str, Any] =Matrix(self.row, another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ : int =F'''Unsupported type given for another ({type(__a )})'''
raise TypeError(__a )
def snake_case ( self : List[Any] )-> Matrix:
lowerCamelCase__ : Union[str, Any] =Matrix(self.column, self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Union[str, Any] =self[r, c]
return result
def snake_case ( self : Optional[int], lowerCamelCase : Matrix, lowerCamelCase : Matrix )-> Any:
assert isinstance(__a, __a ) and isinstance(__a, __a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ : int =v.transpose()
lowerCamelCase__ : Optional[int] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ : Dict =1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ : str =Matrix(3 , 1 , 0 )
lowerCamelCase__ : Any =1, 2, -3
lowerCamelCase__ : List[str] =Matrix(3 , 1 , 0 )
lowerCamelCase__ : List[str] =4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}''' )
def snake_case__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = LongformerTokenizer
_a = True
_a = LongformerTokenizerFast
_a = True
def snake_case ( self : int )-> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Dict =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ : Tuple =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
lowerCamelCase__ : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : Any ={'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Tuple =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Union[str, Any] =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def snake_case ( self : int, **lowerCamelCase : Union[str, Any] )-> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_snake_case )
def snake_case ( self : Optional[int], **lowerCamelCase : List[Any] )-> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **_snake_case )
def snake_case ( self : Any, lowerCamelCase : Optional[Any] )-> List[Any]:
lowerCamelCase__ : int ='''lower newer'''
lowerCamelCase__ : List[str] ='''lower newer'''
return input_text, output_text
def snake_case ( self : Any )-> Dict:
lowerCamelCase__ : Tuple =self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : List[str] ='''lower newer'''
lowerCamelCase__ : Any =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ : int =tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case, _snake_case )
lowerCamelCase__ : List[Any] =tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), _snake_case )
def snake_case ( self : List[str] )-> Tuple:
lowerCamelCase__ : str =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''', add_special_tokens=_snake_case ), [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''', add_special_tokens=_snake_case ), [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2], )
@slow
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCamelCase__ : str =tokenizer.encode('''sequence builders''', add_special_tokens=_snake_case )
lowerCamelCase__ : Any =tokenizer.encode('''multi-sequence build''', add_special_tokens=_snake_case )
lowerCamelCase__ : List[str] =tokenizer.encode(
'''sequence builders''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
lowerCamelCase__ : str =tokenizer.encode(
'''sequence builders''', '''multi-sequence build''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
lowerCamelCase__ : int =tokenizer.build_inputs_with_special_tokens(_snake_case )
lowerCamelCase__ : int =tokenizer.build_inputs_with_special_tokens(_snake_case, _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case ( self : List[str] )-> int:
lowerCamelCase__ : List[str] =self.get_tokenizer()
lowerCamelCase__ : str ='''Encode this sequence.'''
lowerCamelCase__ : List[Any] =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase__ : Dict =tokenizer.encode(_snake_case, add_special_tokens=_snake_case, add_prefix_space=_snake_case )
lowerCamelCase__ : Tuple =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case, _snake_case )
lowerCamelCase__ : Tuple =tokenizer.encode(_snake_case, add_special_tokens=_snake_case, add_prefix_space=_snake_case )
lowerCamelCase__ : int =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case, _snake_case )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase__ : List[str] =tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
lowerCamelCase__ : Tuple =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case, _snake_case )
# Testing spaces after special tokens
lowerCamelCase__ : Optional[int] ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case )} ) # mask token has a left space
lowerCamelCase__ : Tuple =tokenizer.convert_tokens_to_ids(_snake_case )
lowerCamelCase__ : Tuple ='''Encode <mask> sequence'''
lowerCamelCase__ : Optional[int] ='''Encode <mask>sequence'''
lowerCamelCase__ : Optional[Any] =tokenizer.encode(_snake_case )
lowerCamelCase__ : Optional[Any] =encoded.index(_snake_case )
lowerCamelCase__ : int =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case, _snake_case )
lowerCamelCase__ : Any =tokenizer.encode(_snake_case )
lowerCamelCase__ : str =encoded.index(_snake_case )
lowerCamelCase__ : int =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case, _snake_case )
def snake_case ( self : Dict )-> Tuple:
pass
def snake_case ( self : Union[str, Any] )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[Any] =self.rust_tokenizer_class.from_pretrained(_snake_case, **_snake_case )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(_snake_case, **_snake_case )
lowerCamelCase__ : Optional[Any] ='''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ : int =tokenizer_r.encode_plus(_snake_case, add_special_tokens=_snake_case, return_token_type_ids=_snake_case )
lowerCamelCase__ : int =tokenizer_p.encode_plus(_snake_case, add_special_tokens=_snake_case, return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ), sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ), sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ), )
lowerCamelCase__ : Dict =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ : int =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_snake_case, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_snake_case, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case ( self : List[Any] )-> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
lowerCamelCase__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : Any =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ : Optional[int] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''], _snake_case )
self.assertEqual(post_processor_state['''add_prefix_space'''], _snake_case )
self.assertEqual(post_processor_state['''trim_offsets'''], _snake_case )
def snake_case ( self : Optional[Any] )-> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ : Union[str, Any] =F'''{text_of_1_token} {text_of_1_token}'''
lowerCamelCase__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : List[str] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : Optional[int] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : List[str] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : List[Any] =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : Optional[int] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : List[str] =F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ : Tuple =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : Any =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : Optional[int] =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : List[str] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )), )
lowerCamelCase__ : Tuple =self.rust_tokenizer_class.from_pretrained(
_snake_case, use_fast=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case )
lowerCamelCase__ : List[str] =tokenizer_r(_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )), )
| 708 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_a = ShapEPipeline
_a = ['prompt']
_a = ['prompt']
_a = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_a = False
@property
def snake_case ( self : Optional[Any] )-> Dict:
return 32
@property
def snake_case ( self : Any )-> Dict:
return 32
@property
def snake_case ( self : int )-> str:
return self.time_input_dim * 4
@property
def snake_case ( self : Optional[int] )-> Any:
return 8
@property
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case ( self : str )-> List[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def snake_case ( self : Any )-> str:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase__ : str =PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case ( self : Optional[Any] )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase__ : Any ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase__ : int =ShapERenderer(**_lowerCAmelCase )
return model
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : str =self.dummy_prior
lowerCamelCase__ : str =self.dummy_text_encoder
lowerCamelCase__ : Dict =self.dummy_tokenizer
lowerCamelCase__ : str =self.dummy_renderer
lowerCamelCase__ : Optional[Any] =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1024, prediction_type='''sample''', use_karras_sigmas=_lowerCAmelCase, clip_sample=_lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase__ : Union[str, Any] ={
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def snake_case ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Any=0 )-> List[Any]:
if str(_lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase__ : Optional[int] =torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase__ : Optional[int] =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] ={
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : Tuple )-> Dict:
lowerCamelCase__ : List[Any] ='''cpu'''
lowerCamelCase__ : Optional[Any] =self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] =self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase__ : List[str] =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ : int =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCamelCase__ : int =output.images[0]
lowerCamelCase__ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase__ : Tuple =np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[Any] )-> Optional[Any]:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case ( self : Optional[int] )-> str:
lowerCamelCase__ : List[Any] =torch_device == '''cpu'''
lowerCamelCase__ : List[str] =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=_lowerCAmelCase, relax_max_difference=_lowerCAmelCase, )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : Tuple =self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase__ : int =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ : Dict =1
lowerCamelCase__ : List[Any] =2
lowerCamelCase__ : List[str] =self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase__ : Tuple =batch_size * [inputs[key]]
lowerCamelCase__ : Optional[int] =pipe(**_lowerCAmelCase, num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
lowerCamelCase__ : List[str] =ShapEPipeline.from_pretrained('''openai/shap-e''' )
lowerCamelCase__ : List[Any] =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ : Dict =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =pipe(
'''a shark''', generator=_lowerCAmelCase, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase, _lowerCAmelCase )
| 709 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase__ : Optional[Any] =json.loads(open(snake_case_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowerCamelCase__ : Union[str, Any] =args.output + """.pt"""
lowerCamelCase__ : List[Any] =OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase__ : Optional[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__ : Optional[int] =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__ : Dict =reader.get_tensor(snake_case_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase__ : List[Any] =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase__ : List[str] =8
lowerCamelCase__ : Dict ="""model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__ : Optional[int] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] =torch.tensor(snake_case_ )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase__ : Optional[Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase__ : Optional[int] ="""model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowerCamelCase__ : List[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : int =torch.tensor(snake_case_ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase__ : Optional[Any] ="""model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowerCamelCase__ : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[int] =torch.tensor(snake_case_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase__ : List[str] =key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__ : Dict ="""model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowerCamelCase__ : Optional[int] =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase__ : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase__ : Tuple ="""model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowerCamelCase__ : Union[str, Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] =torch.tensor(snake_case_ )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase__ : List[Any] ="""model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowerCamelCase__ : Optional[Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase__ : Optional[Any] ="""model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowerCamelCase__ : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : int =torch.tensor(snake_case_ )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase__ : List[Any] ="""model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowerCamelCase__ : List[str] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[str] =torch.tensor(snake_case_ )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase__ : Tuple =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Any ="""model.blocks.%d.feed_forward.norm.bias""" % player
lowerCamelCase__ : Optional[int] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : str =torch.tensor(snake_case_ )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : int ="""model.blocks.%d.feed_forward.norm.weight""" % player
lowerCamelCase__ : List[Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Tuple =torch.tensor(snake_case_ )
elif key_name.startswith('''model/att''' ):
lowerCamelCase__ : Optional[int] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase__ : Any =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__ : List[Any] =state[:, 0, :, :]
lowerCamelCase__ : Any =state[:, 1, :, :]
lowerCamelCase__ : int =state[:, 2, :, :]
lowerCamelCase__ : List[str] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : str =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any ="""model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ )
lowerCamelCase__ : Optional[int] ="""model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowerCamelCase__ : int =torch.tensor(snake_case_ )
lowerCamelCase__ : Optional[int] ="""model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowerCamelCase__ : Any =torch.tensor(snake_case_ )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase__ : int ="""model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowerCamelCase__ : int =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] =torch.tensor(snake_case_ )
elif key_name.startswith('''model/an''' ):
lowerCamelCase__ : Dict =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Optional[Any] ="""model.blocks.%d.self_attn.norm.bias""" % player
lowerCamelCase__ : Dict =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : int =torch.tensor(snake_case_ )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Tuple ="""model.blocks.%d.self_attn.norm.weight""" % player
lowerCamelCase__ : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase__ : Union[str, Any] ={"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowerCamelCase__ : Dict ="""model.%s.weight""" % nlayer
lowerCamelCase__ : Tuple =vnp.copy() # same in embedded
lowerCamelCase__ : Union[str, Any] =torch.tensor(snake_case_ )
if key_name.startswith('''model/wte''' ):
lowerCamelCase__ : Dict ="""lm_head.weight"""
lowerCamelCase__ : Union[str, Any] =vnp.copy() # same in embedded
lowerCamelCase__ : Optional[int] =torch.tensor(snake_case_ )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase__ : Union[str, Any] ="""final_logits_bias"""
lowerCamelCase__ : Tuple =vnp.copy() # same in embedded
lowerCamelCase__ : Optional[Any] =state.reshape((1, -1) )
lowerCamelCase__ : str =torch.tensor(snake_case_ )
elif key_name == "model/dense/kernel":
lowerCamelCase__ : Dict ="""model.last_project.weight"""
lowerCamelCase__ : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] =torch.tensor(snake_case_ )
elif key_name == "model/dense_1/bias":
lowerCamelCase__ : Dict ="""model.last_project.bias"""
lowerCamelCase__ : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[str] =torch.tensor(snake_case_ )
torch.save(snake_case_ , args.output )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_lowercase : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : Tuple )-> Tuple:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase__ : List[Any] ='''xvjiarui/stable-diffusion-2-inpainting'''
lowerCamelCase__ : Dict =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__, safety_checker=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] ='''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCamelCase__ : List[str] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =50
lowerCamelCase__ : Any =jax.device_count()
lowerCamelCase__ : str =num_samples * [prompt]
lowerCamelCase__ : int =num_samples * [init_image]
lowerCamelCase__ : Tuple =num_samples * [mask_image]
lowerCamelCase__ : int =pipeline.prepare_inputs(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# shard inputs and rng
lowerCamelCase__ : Optional[Any] =replicate(UpperCamelCase__ )
lowerCamelCase__ : Any =jax.random.split(UpperCamelCase__, jax.device_count() )
lowerCamelCase__ : Any =shard(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] =shard(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] =shard(UpperCamelCase__ )
lowerCamelCase__ : int =pipeline(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, jit=UpperCamelCase__ )
lowerCamelCase__ : int =output.images.reshape(UpperCamelCase__, 512, 512, 3 )
lowerCamelCase__ : Union[str, Any] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Dict =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Union[str, Any] =jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 711 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 0 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict )-> int:
super().__init__()
lowerCamelCase__ : Tuple =module
lowerCamelCase__ : int =nn.Sequential(
nn.Linear(module.in_features, _lowerCamelCase, bias=_lowerCamelCase ), nn.Linear(_lowerCamelCase, module.out_features, bias=_lowerCamelCase ), )
lowerCamelCase__ : int =(2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case ( self : List[str], lowerCamelCase : Tuple, *lowerCamelCase : Optional[int], **lowerCamelCase : int )-> Dict:
return self.module(_lowerCamelCase, *_lowerCamelCase, **_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = "bigscience/bloom-1b7"
# Constant values
_a = 2.109659552692574
_a = "Hello my name is"
_a = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_a = 1_0
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained(self.model_name )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self : List[str] )-> str:
super().setUp()
# Models and tokenizer
lowerCamelCase__ : List[str] =AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.floataa, device_map='''auto''' )
lowerCamelCase__ : List[str] =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
def snake_case ( self : Optional[int] )-> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Tuple )-> str:
lowerCamelCase__ : Union[str, Any] =self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase, '''quantization_config''' ) )
lowerCamelCase__ : Optional[Any] =config.to_dict()
lowerCamelCase__ : Optional[Any] =config.to_diff_dict()
lowerCamelCase__ : Optional[int] =config.to_json_string()
def snake_case ( self : List[Any] )-> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
lowerCamelCase__ : List[Any] =self.model_fpaa.get_memory_footprint()
lowerCamelCase__ : str =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase__ : List[Any] =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case ( self : List[str] )-> Tuple:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase, torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case ( self : Union[str, Any] )-> Tuple:
lowerCamelCase__ : Tuple =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_lowerCamelCase ), self.EXPECTED_OUTPUTS )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Optional[int] =BitsAndBytesConfig()
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Tuple =AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=_lowerCamelCase, device_map='''auto''' )
lowerCamelCase__ : Dict =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_lowerCamelCase ), self.EXPECTED_OUTPUTS )
def snake_case ( self : List[str] )-> Any:
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def snake_case ( self : Any )-> Dict:
lowerCamelCase__ : int =BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
lowerCamelCase__ : List[str] =AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=_lowerCamelCase, load_in_abit=_lowerCamelCase, device_map='''auto''', bnb_abit_quant_type='''nf4''', )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase__ : str =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.model_fpaa.to(torch.floataa )
lowerCamelCase__ : Optional[int] =self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
# Check this does not throw an error
lowerCamelCase__ : Union[str, Any] =self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase__ : Optional[int] =self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase__ : Tuple =self.model_fpaa.float()
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =AutoModelForSeqaSeqLM.from_pretrained('''t5-small''', load_in_abit=_lowerCamelCase, device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls : int )-> List[Any]:
lowerCamelCase__ : Dict ='''t5-small'''
lowerCamelCase__ : int ='''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase__ : Any ='''Translate in German: Hello, my dog is cute'''
def snake_case ( self : Tuple )-> str:
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> List[Any]:
from transformers import TaForConditionalGeneration
lowerCamelCase__ : Optional[Any] =TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase__ : List[Any] =None
# test with `t5-small`
lowerCamelCase__ : Dict =TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
lowerCamelCase__ : Tuple =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : Optional[Any] =model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
lowerCamelCase__ : int =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
lowerCamelCase__ : Tuple =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : Union[str, Any] =model.generate(**_lowerCamelCase )
lowerCamelCase__ : Optional[int] =modules
def snake_case ( self : Optional[int] )-> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase__ : Dict =TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) )
lowerCamelCase__ : Dict =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : int =model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
lowerCamelCase__ : Optional[Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
lowerCamelCase__ : Union[str, Any] =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : Optional[Any] =model.generate(**_lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self : Dict )-> Union[str, Any]:
super().setUp()
# model_name
lowerCamelCase__ : Any ='''bigscience/bloom-560m'''
lowerCamelCase__ : List[Any] ='''t5-small'''
# Different types of model
lowerCamelCase__ : List[Any] =AutoModel.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
# Sequence classification model
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
# CausalLM model
lowerCamelCase__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
# Seq2seq model
lowerCamelCase__ : str =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name, load_in_abit=_lowerCamelCase, device_map='''auto''' )
def snake_case ( self : Any )-> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any )-> str:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Tuple:
super().setUp()
def snake_case ( self : int )-> Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : int )-> Union[str, Any]:
lowerCamelCase__ : str =pipeline(
'''text-generation''', model=self.model_name, model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, )
# Real second forward pass
lowerCamelCase__ : Dict =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''], self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self : int )-> Optional[Any]:
super().setUp()
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(
self.model_name, load_in_abit=_lowerCamelCase, device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} )
# Check that inference pass works on the model
lowerCamelCase__ : Dict =self.tokenizer(self.input_text, return_tensors='''pt''' )
# Second real batch
lowerCamelCase__ : List[str] =model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=_lowerCamelCase ), self.EXPECTED_OUTPUTS )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> int:
lowerCamelCase__ : List[Any] ='''facebook/opt-350m'''
super().setUp()
def snake_case ( self : Any )-> str:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase__ : Any =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase__ : Dict =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase__ : Optional[int] =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
lowerCamelCase__ : List[str] =LoRALayer(module.q_proj, rank=16 )
lowerCamelCase__ : Dict =LoRALayer(module.k_proj, rank=16 )
lowerCamelCase__ : str =LoRALayer(module.v_proj, rank=16 )
# Step 3: dummy batch
lowerCamelCase__ : str =self.tokenizer('''Test batch ''', return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase__ : Dict =model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase, nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
_a = "gpt2-xl"
_a = 3.3191854854152187
| 712 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 0 |
"""simple docstring"""
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : Dict =data
lowerCamelCase__ : Any =None
def __repr__( self : Optional[Any] )-> str:
return F'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : str =None
def __iter__( self : str )-> Any:
lowerCamelCase__ : Optional[Any] =self.head
while node:
yield node.data
lowerCamelCase__ : Dict =node.next
def __len__( self : int )-> int:
return sum(1 for _ in self )
def __repr__( self : str )-> str:
return "->".join([str(lowerCamelCase ) for item in self] )
def __getitem__( self : Any, lowerCamelCase : str )-> Any:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any] )-> None:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowerCamelCase__ : Any =self.head
for _ in range(lowerCamelCase ):
lowerCamelCase__ : Any =current.next
lowerCamelCase__ : Optional[int] =data
def snake_case ( self : List[str], lowerCamelCase : Any )-> None:
self.insert_nth(len(self ), lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Dict )-> None:
self.insert_nth(0, lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : Dict, lowerCamelCase : Optional[int] )-> None:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowerCamelCase__ : Tuple =Node(lowerCamelCase )
if self.head is None:
lowerCamelCase__ : Dict =new_node
elif index == 0:
lowerCamelCase__ : Union[str, Any] =self.head # link new_node to head
lowerCamelCase__ : str =new_node
else:
lowerCamelCase__ : int =self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Tuple =temp.next
lowerCamelCase__ : List[Any] =temp.next
lowerCamelCase__ : str =new_node
def snake_case ( self : List[str] )-> None: # print every node data
print(self )
def snake_case ( self : List[str] )-> Any:
return self.delete_nth(0 )
def snake_case ( self : List[str] )-> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def snake_case ( self : Tuple, lowerCamelCase : Union[str, Any] = 0 )-> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowerCamelCase__ : Optional[Any] =self.head # default first node
if index == 0:
lowerCamelCase__ : Dict =self.head.next
else:
lowerCamelCase__ : Union[str, Any] =self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Dict =temp.next
lowerCamelCase__ : Tuple =temp.next
lowerCamelCase__ : List[Any] =temp.next.next
return delete_node.data
def snake_case ( self : List[str] )-> bool:
return self.head is None
def snake_case ( self : List[Any] )-> None:
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Union[str, Any] =self.head
while current:
# Store the current node's next node.
lowerCamelCase__ : Any =current.next
# Make the current node's next point backwards
lowerCamelCase__ : Any =prev
# Make the previous node be the current node
lowerCamelCase__ : int =current
# Make the current node the next node (to progress iteration)
lowerCamelCase__ : Optional[int] =next_node
# Return prev in order to put the head at the end
lowerCamelCase__ : Tuple =prev
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCamelCase__ : int =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =[
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-192.55555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCamelCase__ : Any =LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase__ : Optional[int] =linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase__ : Tuple =linked_list.delete_tail()
assert result == 12.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase__ : List[str] =linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case__ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCamelCase__ : str =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowerCamelCase__ : Union[str, Any] =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Tuple =a.name
lowerCamelCase__ : str =b.name
lowerCamelCase__ : Optional[int] =''''''
lowerCamelCase__ : Dict =''''''
lowerCamelCase__ : Tuple =a == b
lowerCamelCase__ : str =name_a
lowerCamelCase__ : List[str] =name_b
return res
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =list(model.graph.initializer )
lowerCamelCase__ : Optional[int] =list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase__ : str =inits[i].name
lowerCamelCase__ : List[str] =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.path.dirname(a__ )
lowerCamelCase__ : Any =os.path.basename(a__ )
lowerCamelCase__ : Union[str, Any] =onnx.load(os.path.join(a__ , a__ ) )
lowerCamelCase__ : List[str] =list(model.graph.initializer )
lowerCamelCase__ : Dict =set()
lowerCamelCase__ : List[str] ={}
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
lowerCamelCase__ : Optional[Any] =inits[j].data_type
lowerCamelCase__ : List[str] =numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
lowerCamelCase__ : Optional[Any] =inits[i].name
lowerCamelCase__ : Optional[int] =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
lowerCamelCase__ : Optional[Any] =[name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
lowerCamelCase__ : int =sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
lowerCamelCase__ : str ='''optimized_''' + model_file_name
lowerCamelCase__ : Tuple =os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 714 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : int=1024, lowerCamelCase : int=1024, lowerCamelCase : str=3.6 )-> Tuple:
lowerCamelCase__ : List[str] =tokenizer
lowerCamelCase__ : List[Any] =tokenizer.bos_token_id
lowerCamelCase__ : List[str] =dataset
lowerCamelCase__ : Optional[Any] =seq_length
lowerCamelCase__ : Dict =seq_length * chars_per_token * num_of_sequences
def __iter__( self : Union[str, Any] )-> Optional[Any]:
lowerCamelCase__ : int =iter(self.dataset )
lowerCamelCase__ : int =True
while more_examples:
lowerCamelCase__ , lowerCamelCase__ : Tuple =[], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase__ : Optional[int] =False
break
lowerCamelCase__ : Optional[Any] =tokenizer(__A, truncation=__A )['''input_ids''']
lowerCamelCase__ : Dict =[]
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(__A ), self.seq_length ):
lowerCamelCase__ : Optional[Any] =all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : List[str] ={'''streaming''': True}
lowerCamelCase__ : Any =load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase__ )
lowerCamelCase__ : List[str] =ConstantLengthDataset(UpperCamelCase__ , UpperCamelCase__ , seq_length=args.seq_length )
lowerCamelCase__ : Tuple =DataLoader(UpperCamelCase__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
lowerCamelCase__ : Dict =[]
for step, batch in enumerate(UpperCamelCase__ ):
with torch.no_grad():
lowerCamelCase__ : Tuple =model(UpperCamelCase__ , labels=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] =outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase__ : Any =torch.mean(torch.cat(UpperCamelCase__ ) )
try:
lowerCamelCase__ : Union[str, Any] =torch.exp(UpperCamelCase__ )
except OverflowError:
lowerCamelCase__ : Dict =float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowercase : str = Accelerator()
# Parse configuration
_lowercase : List[Any] = HfArgumentParser(EvaluationArguments)
_lowercase : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
_lowercase : str = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_lowercase : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowercase : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowercase , _lowercase : List[str] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_lowercase , _lowercase : List[str] = evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
def __lowercase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCamelCase__ : Tuple =1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase__ : Union[str, Any] =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase__ : Optional[Any] =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowercase : int = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case__ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowerCamelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def snake_case__ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def snake_case__ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowerCamelCase ):
http_head('''https://huggingface.co''' )
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_lowercase : List[Any] = {
"junnyu/roformer_chinese_small": 1_5_3_6,
"junnyu/roformer_chinese_base": 1_5_3_6,
"junnyu/roformer_chinese_char_small": 5_1_2,
"junnyu/roformer_chinese_char_base": 5_1_2,
"junnyu/roformer_small_discriminator": 1_2_8,
"junnyu/roformer_small_generator": 1_2_8,
}
_lowercase : List[Any] = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = RoFormerTokenizer
def __init__( self : Any, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : Dict=True, lowerCamelCase : Union[str, Any]="[UNK]", lowerCamelCase : List[Any]="[SEP]", lowerCamelCase : List[str]="[PAD]", lowerCamelCase : List[str]="[CLS]", lowerCamelCase : List[str]="[MASK]", lowerCamelCase : int=True, lowerCamelCase : List[Any]=None, **lowerCamelCase : int, )-> int:
super().__init__(
__A, tokenizer_file=__A, do_lower_case=__A, unk_token=__A, sep_token=__A, pad_token=__A, cls_token=__A, mask_token=__A, tokenize_chinese_chars=__A, strip_accents=__A, **__A, )
lowerCamelCase__ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''', __A ) != do_lower_case
or pre_tok_state.get('''strip_accents''', __A ) != strip_accents
):
lowerCamelCase__ : List[str] =getattr(__A, pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Optional[Any] =do_lower_case
lowerCamelCase__ : Dict =strip_accents
lowerCamelCase__ : List[str] =pre_tok_class(**__A )
lowerCamelCase__ : int =do_lower_case
def __getstate__( self : Union[str, Any] )-> str:
lowerCamelCase__ : int =self.__dict__.copy()
lowerCamelCase__ : List[Any] =BertPreTokenizer()
return state
def __setstate__( self : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Any =d
lowerCamelCase__ : Optional[Any] =self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase__ : List[str] =PreTokenizer.custom(JiebaPreTokenizer(__A ) )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str=None )-> int:
lowerCamelCase__ : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : Tuple, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Optional[int] =[self.sep_token_id]
lowerCamelCase__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : int =self._tokenizer.model.save(__A, name=__A )
return tuple(__A )
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : List[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[str]=False, **lowerCamelCase : Optional[Any], )-> Dict:
lowerCamelCase__ : Dict =BertPreTokenizer()
return super().save_pretrained(__A, __A, __A, __A, **__A )
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict ={}
lowerCamelCase__ : Optional[int] =job["""started_at"""]
lowerCamelCase__ : Tuple =job["""completed_at"""]
lowerCamelCase__ : Any =date_parser.parse(lowercase_ )
lowerCamelCase__ : Tuple =date_parser.parse(lowercase_ )
lowerCamelCase__ : Dict =round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCamelCase__ : int =start
lowerCamelCase__ : Tuple =end
lowerCamelCase__ : int =duration_in_min
return job_info
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =None
if token is not None:
lowerCamelCase__ : str ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
lowerCamelCase__ : Any =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCamelCase__ : List[str] =requests.get(lowercase_ , headers=lowercase_ ).json()
lowerCamelCase__ : str ={}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase_ ) for job in result['''jobs''']} )
lowerCamelCase__ : Tuple =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowercase_ ):
lowerCamelCase__ : Optional[int] =requests.get(url + f'''&page={i + 2}''' , headers=lowercase_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_lowercase : List[str] = parser.parse_args()
_lowercase : str = get_job_time(args.workflow_run_id)
_lowercase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Dict = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 'swin'
_a = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Tuple, lowerCamelCase : List[str]=224, lowerCamelCase : Optional[Any]=4, lowerCamelCase : List[str]=3, lowerCamelCase : Optional[int]=96, lowerCamelCase : List[Any]=[2, 2, 6, 2], lowerCamelCase : Any=[3, 6, 12, 24], lowerCamelCase : List[str]=7, lowerCamelCase : List[Any]=4.0, lowerCamelCase : Dict=True, lowerCamelCase : int=0.0, lowerCamelCase : Any=0.0, lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : int=False, lowerCamelCase : Any=0.02, lowerCamelCase : Optional[int]=1E-5, lowerCamelCase : Any=32, lowerCamelCase : List[str]=None, lowerCamelCase : List[str]=None, **lowerCamelCase : int, )-> Dict:
super().__init__(**_a )
lowerCamelCase__ : Optional[Any] =image_size
lowerCamelCase__ : int =patch_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : str =embed_dim
lowerCamelCase__ : Dict =depths
lowerCamelCase__ : Any =len(_a )
lowerCamelCase__ : Any =num_heads
lowerCamelCase__ : List[Any] =window_size
lowerCamelCase__ : Union[str, Any] =mlp_ratio
lowerCamelCase__ : Union[str, Any] =qkv_bias
lowerCamelCase__ : Optional[int] =hidden_dropout_prob
lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase__ : List[str] =drop_path_rate
lowerCamelCase__ : Tuple =hidden_act
lowerCamelCase__ : str =use_absolute_embeddings
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : Dict =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : Tuple =int(embed_dim * 2 ** (len(_a ) - 1) )
lowerCamelCase__ : Optional[int] =['''stem'''] + [F'''stage{idx}''' for idx in range(1, len(_a ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ : Any =get_aligned_output_features_output_indices(
out_features=_a, out_indices=_a, stage_names=self.stage_names )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = version.parse('1.11' )
@property
def snake_case ( self : Union[str, Any] )-> str:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self : Optional[int] )-> int:
return 1E-4
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCamelCase__ : int =AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCamelCase__ : str =load_dataset('''nielsr/rvlcdip-demo''' )
lowerCamelCase__ : List[str] =dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCamelCase__ : Tuple =image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] =outputs.logits
lowerCamelCase__ : int =torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
lowerCamelCase__ : List[Any] =(l + r) // 2
if v[m] >= key:
lowerCamelCase__ : Union[str, Any] =m
else:
lowerCamelCase__ : List[str] =m # noqa: E741
return r
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if len(__lowerCamelCase ) == 0:
return 0
lowerCamelCase__ : Optional[int] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ : List[Any] =v[0]
for i in range(1 , len(__lowerCamelCase ) ):
if v[i] < tail[0]:
lowerCamelCase__ : Union[str, Any] =v[i]
elif v[i] > tail[length - 1]:
lowerCamelCase__ : str =v[i]
length += 1
else:
lowerCamelCase__ : Tuple =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case__ ( __lowerCamelCase : Image , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =(259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_lowercase : str =change_contrast(img, 1_7_0)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if num <= 0:
lowerCamelCase__ : Dict =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[int] =[True] * (num + 1)
lowerCamelCase__ : str =[]
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : List[str] =int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE_ ):
if sieve[i] is True:
lowerCamelCase__ : Dict =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 703 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 0 |
"""simple docstring"""
from random import randint, random
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : int = 5 , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : int =max(__lowercase , 0 )
while i < number_of_cells:
lowerCamelCase__ : Any =(
randint(0 , __lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =highway_now[car_index + 1 :]
for cell in range(len(__lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowercase , -1 )
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Any =len(__lowercase )
# Beforce calculations, the highway is empty
lowerCamelCase__ : Tuple =[-1] * number_of_cells
for car_index in range(__lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase__ : str =min(highway_now[car_index] + 1 , __lowercase )
# Number of empty cell before the next car
lowerCamelCase__ : Optional[Any] =get_distance(__lowercase , __lowercase ) - 1
# We can't have the car causing an accident
lowerCamelCase__ : Union[str, Any] =min(next_highway[car_index] , __lowercase )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase__ : int =max(next_highway[car_index] - 1 , 0 )
return next_highway
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Dict =len(highway[0] )
for i in range(__lowercase ):
lowerCamelCase__ : Any =update(highway[i] , __lowercase , __lowercase )
lowerCamelCase__ : List[Any] =[-1] * number_of_cells
for car_index in range(__lowercase ):
lowerCamelCase__ : List[Any] =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase__ : Any =(car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase__ : Union[str, Any] =speed
highway.append(__lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : int =inspect.getfile(accelerate.test_utils )
lowerCamelCase__ : Union[str, Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase__ : List[Any] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCamelCase__ : Any =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def snake_case ( self : Any )-> List[str]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase__ : Optional[int] =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A, env=os.environ.copy() )
@require_multi_gpu
def snake_case ( self : str )-> Optional[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase__ : Optional[int] =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A, env=os.environ.copy() )
@require_multi_gpu
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Dict =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A, env=os.environ.copy() )
@require_multi_gpu
def snake_case ( self : Union[str, Any] )-> Dict:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowerCamelCase__ : Union[str, Any] =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_A, env=os.environ.copy() )
if __name__ == "__main__":
_lowercase : Tuple = Accelerator()
_lowercase : List[Any] = (accelerator.state.process_index + 2, 1_0)
_lowercase : Any = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowercase : int = ""
_lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowercase : Optional[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
random.seed(lowerCAmelCase__ )
np.random.seed(lowerCAmelCase__ )
torch.manual_seed(lowerCAmelCase__ )
torch.cuda.manual_seed_all(lowerCAmelCase__ )
# ^^ safe to call this function even if cuda is not available
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any, lowerCamelCase : Iterable[torch.nn.Parameter], lowerCamelCase : float = 0.9_999, lowerCamelCase : float = 0.0, lowerCamelCase : int = 0, lowerCamelCase : bool = False, lowerCamelCase : Union[float, int] = 1.0, lowerCamelCase : Union[float, int] = 2 / 3, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Dict[str, Any] = None, **lowerCamelCase : int, )-> List[Any]:
if isinstance(snake_case__, torch.nn.Module ):
lowerCamelCase__ : Optional[Any] =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''', '''1.0.0''', snake_case__, standard_warn=snake_case__, )
lowerCamelCase__ : Union[str, Any] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCamelCase__ : Optional[Any] =True
if kwargs.get('''max_value''', snake_case__ ) is not None:
lowerCamelCase__ : Optional[Any] ='''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''', '''1.0.0''', snake_case__, standard_warn=snake_case__ )
lowerCamelCase__ : List[str] =kwargs['''max_value''']
if kwargs.get('''min_value''', snake_case__ ) is not None:
lowerCamelCase__ : str ='''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''', '''1.0.0''', snake_case__, standard_warn=snake_case__ )
lowerCamelCase__ : int =kwargs['''min_value''']
lowerCamelCase__ : List[Any] =list(snake_case__ )
lowerCamelCase__ : Dict =[p.clone().detach() for p in parameters]
if kwargs.get('''device''', snake_case__ ) is not None:
lowerCamelCase__ : Optional[Any] ='''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''', '''1.0.0''', snake_case__, standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Tuple =decay
lowerCamelCase__ : List[Any] =min_decay
lowerCamelCase__ : Optional[int] =update_after_step
lowerCamelCase__ : List[str] =use_ema_warmup
lowerCamelCase__ : List[Any] =inv_gamma
lowerCamelCase__ : List[str] =power
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Dict =None # set in `step()`
lowerCamelCase__ : Any =model_cls
lowerCamelCase__ : Any =model_config
@classmethod
def snake_case ( cls : Dict, lowerCamelCase : List[Any], lowerCamelCase : Any )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict =model_cls.load_config(snake_case__, return_unused_kwargs=snake_case__ )
lowerCamelCase__ : Optional[int] =model_cls.from_pretrained(snake_case__ )
lowerCamelCase__ : Dict =cls(model.parameters(), model_cls=snake_case__, model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def snake_case ( self : Optional[Any], lowerCamelCase : List[Any] )-> int:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowerCamelCase__ : List[Any] =self.model_cls.from_config(self.model_config )
lowerCamelCase__ : List[Any] =self.state_dict()
state_dict.pop('''shadow_params''', snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def snake_case ( self : List[str], lowerCamelCase : int )-> Optional[int]:
lowerCamelCase__ : int =max(0, optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCamelCase__ : str =1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCamelCase__ : List[Any] =(1 + step) / (10 + step)
lowerCamelCase__ : Any =min(snake_case__, self.decay )
# make sure decay is not smaller than min_decay
lowerCamelCase__ : Optional[Any] =max(snake_case__, self.min_decay )
return cur_decay_value
@torch.no_grad()
def snake_case ( self : List[str], lowerCamelCase : Iterable[torch.nn.Parameter] )-> Dict:
if isinstance(snake_case__, torch.nn.Module ):
lowerCamelCase__ : List[str] =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''', '''1.0.0''', snake_case__, standard_warn=snake_case__, )
lowerCamelCase__ : Dict =parameters.parameters()
lowerCamelCase__ : List[Any] =list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCamelCase__ : Optional[Any] =self.get_decay(self.optimization_step )
lowerCamelCase__ : Any =decay
lowerCamelCase__ : Tuple =1 - decay
lowerCamelCase__ : Tuple =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params, snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCamelCase__ : int =deepspeed.zero.GatheredParameters(snake_case__, modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def snake_case ( self : List[Any], lowerCamelCase : Iterable[torch.nn.Parameter] )-> Dict:
lowerCamelCase__ : Any =list(snake_case__ )
for s_param, param in zip(self.shadow_params, snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def snake_case ( self : List[str], lowerCamelCase : int=None, lowerCamelCase : int=None )-> str:
lowerCamelCase__ : Dict =[
p.to(device=snake_case__, dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def snake_case ( self : Optional[int] )-> Optional[int]:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def snake_case ( self : Any, lowerCamelCase : Iterable[torch.nn.Parameter] )-> str:
lowerCamelCase__ : Optional[Any] =[param.detach().cpu().clone() for param in parameters]
def snake_case ( self : str, lowerCamelCase : Iterable[torch.nn.Parameter] )-> List[Any]:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params, snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCamelCase__ : int =None
def snake_case ( self : Union[str, Any], lowerCamelCase : dict )-> Optional[int]:
lowerCamelCase__ : Dict =copy.deepcopy(snake_case__ )
lowerCamelCase__ : List[str] =state_dict.get('''decay''', self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowerCamelCase__ : Dict =state_dict.get('''min_decay''', self.min_decay )
if not isinstance(self.min_decay, snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowerCamelCase__ : Optional[Any] =state_dict.get('''optimization_step''', self.optimization_step )
if not isinstance(self.optimization_step, snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowerCamelCase__ : str =state_dict.get('''update_after_step''', self.update_after_step )
if not isinstance(self.update_after_step, snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowerCamelCase__ : Optional[int] =state_dict.get('''use_ema_warmup''', self.use_ema_warmup )
if not isinstance(self.use_ema_warmup, snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowerCamelCase__ : Tuple =state_dict.get('''inv_gamma''', self.inv_gamma )
if not isinstance(self.inv_gamma, (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowerCamelCase__ : Optional[int] =state_dict.get('''power''', self.power )
if not isinstance(self.power, (float, int) ):
raise ValueError('''Invalid power''' )
lowerCamelCase__ : str =state_dict.get('''shadow_params''', snake_case__ )
if shadow_params is not None:
lowerCamelCase__ : Dict =shadow_params
if not isinstance(self.shadow_params, snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__, torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 706 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
for e in env_keys:
lowerCamelCase__ : Any =int(os.environ.get(lowerCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
lowerCamelCase__ : List[str] =os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]="no" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return value
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if edge <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
if edge <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ : Any =OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if conf_path is None:
lowerCamelCase__ : str ='''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase__ : Dict =load_config(_A , display=_A )
lowerCamelCase__ : Union[str, Any] =VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ : int ='''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase__ : str =torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
lowerCamelCase__ : Optional[int] =sd['''state_dict''']
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =model.encode(_A )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ : str =model.decode(_A )
return xrec
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase__ : Dict =importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=True ):
"""simple docstring"""
lowerCamelCase__ : List[str] =instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
lowerCamelCase__ : Any =torch.load(_A , map_location='''cpu''' )
lowerCamelCase__ : Dict =pl_sd['''global_step''']
print(f'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ : int ={'''state_dict''': None}
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : List[str] =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_A , eval_mode=_A )['''model''']
return model, global_step
| 709 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =0
for ch in input_str:
lowerCamelCase__ : Union[str, Any] =ord(A_ )
lowerCamelCase__ : List[Any] =pow(2 , A_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase : int = get_logger(__name__)
_lowercase : List[Any] = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(_A )
def __call__( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : int )-> List[str]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(_A )
def __call__( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Tuple )-> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
@add_start_docstrings(_A )
def __call__( self : str, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : str, **lowerCamelCase : Optional[Any] )-> Tuple:
for processor in self:
lowerCamelCase__ : str =inspect.signature(processor.__call__ ).parameters
if len(_A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowerCamelCase__ : Any =processor(_A, _A, _A, **_A )
else:
lowerCamelCase__ : Union[str, Any] =processor(_A, _A, _A )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> Optional[Any]:
if not isinstance(_A, _A ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowerCamelCase__ : Optional[int] =temperature
def __call__( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : Dict )-> Optional[int]:
lowerCamelCase__ : Dict =scores / self.temperature
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] = -float('''Inf''' ), lowerCamelCase : Union[str, Any] = 1 )-> Any:
if not isinstance(_A, _A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_A, _A ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowerCamelCase__ : Optional[Any] =top_p
lowerCamelCase__ : Optional[Any] =filter_value
lowerCamelCase__ : List[str] =min_tokens_to_keep
def __call__( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =lax.top_k(_A, scores.shape[-1] )
lowerCamelCase__ : Union[str, Any] =jnp.full_like(_A, self.filter_value )
lowerCamelCase__ : int =jax.nn.softmax(_A, axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ : Union[str, Any] =cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ : List[Any] =jnp.roll(_A, 1 )
score_mask |= score_mask.at[:, 0].set(_A )
# min tokens to keep
lowerCamelCase__ : int =score_mask.at[:, : self.min_tokens_to_keep].set(_A )
lowerCamelCase__ : Tuple =jnp.where(_A, _A, _A )
lowerCamelCase__ : List[str] =jax.lax.sort_key_val(_A, _A )[-1]
return next_scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : int = -float('''Inf''' ), lowerCamelCase : Optional[int] = 1 )-> Optional[int]:
if not isinstance(_A, _A ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowerCamelCase__ : Dict =max(_A, _A )
lowerCamelCase__ : Tuple =filter_value
def __call__( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : Tuple =scores.shape
lowerCamelCase__ : List[Any] =jnp.full(batch_size * vocab_size, self.filter_value )
lowerCamelCase__ : Optional[int] =min(self.top_k, scores.shape[-1] ) # Safety check
lowerCamelCase__ : Dict =lax.top_k(_A, _A )
lowerCamelCase__ : Optional[Any] =jnp.broadcast_to((jnp.arange(_A ) * vocab_size)[:, None], (batch_size, topk) ).flatten()
lowerCamelCase__ : Union[str, Any] =topk_scores.flatten()
lowerCamelCase__ : List[Any] =topk_indices.flatten() + shift
lowerCamelCase__ : Dict =next_scores_flat.at[topk_indices_flat].set(_A )
lowerCamelCase__ : Optional[Any] =next_scores_flat.reshape(_A, _A )
return next_scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =bos_token_id
def __call__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =jnp.full(scores.shape, -float('''inf''' ) )
lowerCamelCase__ : List[Any] =1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ : Union[str, Any] =jnp.where(_A, new_scores.at[:, self.bos_token_id].set(0 ), _A )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : int )-> Dict:
lowerCamelCase__ : Union[str, Any] =max_length
lowerCamelCase__ : Union[str, Any] =eos_token_id
def __call__( self : Optional[int], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : Optional[int] =jnp.full(scores.shape, -float('''inf''' ) )
lowerCamelCase__ : str =1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ : Optional[Any] =jnp.where(_A, new_scores.at[:, self.eos_token_id].set(0 ), _A )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : Dict )-> Optional[Any]:
if not isinstance(_A, _A ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_A, _A ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowerCamelCase__ : Tuple =min_length
lowerCamelCase__ : List[Any] =eos_token_id
def __call__( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : int )-> Tuple:
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ : Optional[Any] =1 - jnp.clip(cur_len - self.min_length, 0, 1 )
lowerCamelCase__ : Union[str, Any] =jnp.where(_A, scores.at[:, self.eos_token_id].set(-float('''inf''' ) ), _A )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Dict, lowerCamelCase : Optional[Any] )-> List[Any]:
lowerCamelCase__ : List[str] =list(_A )
lowerCamelCase__ : List[Any] =begin_index
def __call__( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Tuple )-> List[Any]:
lowerCamelCase__ : Optional[Any] =1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ : Tuple =jnp.where(_A, scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ), _A )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : List[str] =list(_A )
def __call__( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : Tuple =scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Union[str, Any] )-> int:
lowerCamelCase__ : Dict =dict(_A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ : Optional[Any] =jnp.ones((max(force_token_map.keys() ) + 1), dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ : Tuple =force_token_array.at[index].set(_A )
lowerCamelCase__ : Union[str, Any] =jnp.intaa(_A )
def __call__( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[str] )-> Any:
def _force_token(lowerCamelCase : List[str] ):
lowerCamelCase__ : int =scores.shape[0]
lowerCamelCase__ : int =self.force_token_array[generation_idx]
lowerCamelCase__ : Tuple =jnp.ones_like(_A, dtype=scores.dtype ) * -float('''inf''' )
lowerCamelCase__ : Tuple =jnp.zeros((batch_size, 1), dtype=scores.dtype )
lowerCamelCase__ : str =lax.dynamic_update_slice(_A, _A, (0, current_token) )
return new_scores
lowerCamelCase__ : int =lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(_A ), lambda: scores, ), )
return scores
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int] )-> int:
lowerCamelCase__ : str =generate_config.eos_token_id
lowerCamelCase__ : Any =generate_config.no_timestamps_token_id
lowerCamelCase__ : List[str] =generate_config.no_timestamps_token_id + 1
lowerCamelCase__ : List[Any] =decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_A, '''max_initial_timestamp_index''' ):
lowerCamelCase__ : List[Any] =generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ : Tuple =model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ : Tuple =model_config.vocab_size
def __call__( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Dict )-> Optional[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ : Any =scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(lowerCamelCase : Dict, lowerCamelCase : Tuple ):
lowerCamelCase__ : Tuple =jnp.where((cur_len - self.begin_index) >= 1, _A, _A )
lowerCamelCase__ : str =jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, _A, )
lowerCamelCase__ : List[str] =jnp.where((cur_len - self.begin_index) < 2, _A, _A )
lowerCamelCase__ : int =jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, _A, _A, )
return jnp.where(
_A, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ), scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ), ), _A, )
lowerCamelCase__ : Tuple =jax.vmap(_A )(_A, _A )
lowerCamelCase__ : List[Any] =jnp.where(cur_len == self.begin_index, _A, _A )
lowerCamelCase__ : str =jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, _A, )
lowerCamelCase__ : Any =self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ : Tuple =jnp.where(
_A, scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ), _A, )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ : Optional[int] =jax.nn.log_softmax(_A, axis=-1 )
def handle_cumulative_probs(lowerCamelCase : List[Any], lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : Any =jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1 )
lowerCamelCase__ : Optional[Any] =jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ), _A, )
lowerCamelCase__ : Union[str, Any] =jax.vmap(_A )(_A, _A )
return scores
| 711 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Any = 1_6
_lowercase : Union[str, Any] = 3_2
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str = 16 , __lowerCamelCase : List[Any] = "bert-base-cased" ):
"""simple docstring"""
lowerCamelCase__ : Tuple =AutoTokenizer.from_pretrained(__snake_case )
lowerCamelCase__ : Optional[Any] =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ : Optional[int] =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Optional[int] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase__ : Tuple =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : str =config['''lr''']
lowerCamelCase__ : Dict =int(config['''num_epochs'''] )
lowerCamelCase__ : Optional[int] =int(config['''seed'''] )
lowerCamelCase__ : Dict =int(config['''batch_size'''] )
lowerCamelCase__ : Optional[int] =args.model_name_or_path
set_seed(__snake_case )
lowerCamelCase__ , lowerCamelCase__ : List[str] =get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[str] =AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
lowerCamelCase__ : List[str] =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ : Any =optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ : List[str] =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ : Union[str, Any] =(len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ : Any =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
lowerCamelCase__ : Dict =DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : List[Any] =0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ : Tuple =0
# Now we train the model
lowerCamelCase__ : Union[str, Any] =evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] ={}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
lowerCamelCase__ : Tuple =model(**__snake_case )
lowerCamelCase__ : Union[str, Any] =outputs.loss
lowerCamelCase__ : Any =loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase__ : Tuple =0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : int =model(**__snake_case )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
lowerCamelCase__ : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ : Any =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase__ : Union[str, Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __snake_case )
lowerCamelCase__ : Tuple =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase__ : int =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Any =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__snake_case , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__snake_case , )
parser.add_argument(
'''--output_dir''' , type=__snake_case , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=__snake_case , default=__snake_case , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=__snake_case , default=3 , help='''Number of train epochs.''' , )
lowerCamelCase__ : List[str] =parser.parse_args()
lowerCamelCase__ : Dict ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowercase : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : Dict, *lowerCamelCase : List[str], **lowerCamelCase : Dict )-> List[str]:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''', lowerCamelCase_, )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowercase = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_lowercase = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =create_model(
'''HTSAT-tiny''' , '''roberta''' , A__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=A__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict ={}
lowerCamelCase__ : str =R'''.*sequential.(\d+).*'''
lowerCamelCase__ : Tuple =R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ : List[Any] =key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
lowerCamelCase__ : List[Any] =re.match(A__ , A__ ).group(1 )
lowerCamelCase__ : List[Any] =key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
lowerCamelCase__ : str =int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase__ : Union[str, Any] =1 if projecton_layer == 0 else 2
lowerCamelCase__ : Tuple =key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase__ : Optional[Any] =value
lowerCamelCase__ : str =mixed_qkv.size(0 ) // 3
lowerCamelCase__ : int =mixed_qkv[:qkv_dim]
lowerCamelCase__ : List[str] =mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase__ : List[Any] =mixed_qkv[qkv_dim * 2 :]
lowerCamelCase__ : Tuple =query_layer
lowerCamelCase__ : Optional[Any] =key_layer
lowerCamelCase__ : Dict =value_layer
else:
lowerCamelCase__ : Any =value
return model_state_dict
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : List[Any] =init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
lowerCamelCase__ : Any =clap_model.state_dict()
lowerCamelCase__ : Optional[Any] =rename_state_dict(A__ )
lowerCamelCase__ : Optional[int] =ClapConfig()
lowerCamelCase__ : int =enable_fusion
lowerCamelCase__ : List[Any] =ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_lowercase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 714 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
_a = 'transfo-xl'
_a = ['mems']
_a = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any, lowerCamelCase : Optional[Any]=26_7735, lowerCamelCase : Dict=[2_0000, 4_0000, 20_0000], lowerCamelCase : Union[str, Any]=1024, lowerCamelCase : Optional[Any]=1024, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : Tuple=64, lowerCamelCase : Optional[int]=4096, lowerCamelCase : List[str]=4, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Tuple=18, lowerCamelCase : Optional[int]=1600, lowerCamelCase : List[str]=1000, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Tuple=True, lowerCamelCase : Dict=0, lowerCamelCase : str=-1, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=0.1, lowerCamelCase : List[Any]=0.0, lowerCamelCase : List[str]=True, lowerCamelCase : str="normal", lowerCamelCase : Tuple=0.01, lowerCamelCase : int=0.01, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : List[Any]=1E-5, lowerCamelCase : int=0, **lowerCamelCase : List[str], )-> int:
lowerCamelCase__ : str =vocab_size
lowerCamelCase__ : int =[]
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
lowerCamelCase__ : str =[False] + [True] * len(self.cutoffs )
else:
lowerCamelCase__ : Optional[Any] =[False] + [False] * len(self.cutoffs )
lowerCamelCase__ : Union[str, Any] =d_model
lowerCamelCase__ : int =d_embed
lowerCamelCase__ : Union[str, Any] =d_head
lowerCamelCase__ : int =d_inner
lowerCamelCase__ : str =div_val
lowerCamelCase__ : Dict =pre_lnorm
lowerCamelCase__ : Any =n_layer
lowerCamelCase__ : List[str] =n_head
lowerCamelCase__ : List[str] =mem_len
lowerCamelCase__ : Tuple =same_length
lowerCamelCase__ : Optional[Any] =attn_type
lowerCamelCase__ : str =clamp_len
lowerCamelCase__ : Optional[Any] =sample_softmax
lowerCamelCase__ : str =adaptive
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : str =dropatt
lowerCamelCase__ : List[str] =untie_r
lowerCamelCase__ : Any =init
lowerCamelCase__ : Tuple =init_range
lowerCamelCase__ : Optional[int] =proj_init_std
lowerCamelCase__ : Tuple =init_std
lowerCamelCase__ : Tuple =layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase, **lowerCamelCase )
@property
def snake_case ( self : Union[str, Any] )-> Dict:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[Any] )-> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case ( self : Dict )-> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : List[str] =ort.SessionOptions()
lowerCamelCase__ : List[Any] =False
return options
def snake_case ( self : Union[str, Any] )-> Optional[Any]:
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowerCamelCase__ : Any =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''onnx''', safety_checker=lowercase_, feature_extractor=lowercase_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCamelCase__ : int ='''A red cat sitting on a park bench'''
lowerCamelCase__ : Any =np.random.RandomState(0 )
lowerCamelCase__ : Dict =pipe(
prompt=lowercase_, image=lowercase_, mask_image=lowercase_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=lowercase_, output_type='''np''', )
lowerCamelCase__ : Union[str, Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_a = 1
@register_to_config
def __init__( self : Optional[Any], lowerCamelCase : Dict = 2000, lowerCamelCase : List[str] = 0.15, lowerCamelCase : Optional[Any] = 0.01, lowerCamelCase : Dict = 1_348.0, lowerCamelCase : List[str] = 1E-5, lowerCamelCase : Union[str, Any] = 1, )-> str:
# standard deviation of the initial noise distribution
lowerCamelCase__ : int =sigma_max
# setable values
lowerCamelCase__ : Any =None
self.set_sigmas(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def snake_case ( self : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int] = None )-> torch.FloatTensor:
return sample
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : str = None, lowerCamelCase : int = None )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase__ : List[Any] =torch.linspace(1, lowerCAmelCase_, lowerCAmelCase_, device=lowerCAmelCase_ )
def snake_case ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : str = None, lowerCamelCase : Any = None, lowerCamelCase : List[str] = None )-> int:
lowerCamelCase__ : List[Any] =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase__ : int =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase__ : Any =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_, lowerCAmelCase_ )
lowerCamelCase__ : str =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase__ : Tuple =torch.exp(torch.linspace(math.log(lowerCAmelCase_ ), math.log(lowerCAmelCase_ ), lowerCAmelCase_ ) )
lowerCamelCase__ : str =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : Tuple )-> int:
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[int] = None, lowerCamelCase : List[Any] = True, )-> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase__ : int =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase__ : int =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase__ : Optional[Any] =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase__ : Optional[int] =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase__ : str =self.get_adjacent_sigma(lowerCAmelCase_, lowerCAmelCase_ ).to(sample.device )
lowerCamelCase__ : Any =torch.zeros_like(lowerCAmelCase_ )
lowerCamelCase__ : Tuple =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase__ : List[str] =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase__ : Tuple =diffusion.unsqueeze(-1 )
lowerCamelCase__ : List[Any] =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase__ : Any =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase_, device=sample.device, dtype=sample.dtype )
lowerCamelCase__ : str =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase__ : Union[str, Any] =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_, prev_sample_mean=lowerCAmelCase_ )
def snake_case ( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] = None, lowerCamelCase : str = True, )-> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase__ : Optional[int] =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase__ : int =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase__ : Optional[Any] =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase__ : List[str] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase__ : Any =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase__ : Dict =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase__ : Optional[int] =step_size.unsqueeze(-1 )
lowerCamelCase__ : str =sample + step_size * model_output
lowerCamelCase__ : Optional[int] =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def snake_case ( self : Any, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], )-> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase__ : Dict =timesteps.to(original_samples.device )
lowerCamelCase__ : Dict =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase__ : List[Any] =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
lowerCamelCase__ : int =noise + original_samples
return noisy_samples
def __len__( self : Optional[int] )-> Tuple:
return self.config.num_train_timesteps
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple=13, lowerCamelCase : List[str]=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : Tuple=True, lowerCamelCase : List[str]=99, lowerCamelCase : List[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : Dict=4, lowerCamelCase : Any=37, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : List[str]=512, lowerCamelCase : Dict=16, lowerCamelCase : Dict=2, lowerCamelCase : int=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : List[str]=4, lowerCamelCase : Any=None, )-> str:
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : List[str] =seq_length
lowerCamelCase__ : Dict =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : List[str] =use_labels
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Optional[int] =hidden_size
lowerCamelCase__ : Any =num_hidden_layers
lowerCamelCase__ : Dict =num_attention_heads
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : List[str] =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : List[str] =type_vocab_size
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : List[str] =num_labels
lowerCamelCase__ : Union[str, Any] =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : Optional[Any] =self.vocab_size - 1
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[Any] =None
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : Optional[Any] =None
if self.use_labels:
lowerCamelCase__ : Tuple =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Tuple =OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
lowerCamelCase__ : List[Any] =ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Any, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : Any, *lowerCamelCase : str )-> Any:
lowerCamelCase__ : str =OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, head_mask=UpperCAmelCase__ )
lowerCamelCase__ : int =model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Dict, *lowerCamelCase : Tuple )-> Optional[Any]:
lowerCamelCase__ : List[str] =OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Any =model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : List[str], lowerCamelCase : int, *lowerCamelCase : Optional[Any] )-> Union[str, Any]:
lowerCamelCase__ : str =OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : int =model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Union[str, Any], *lowerCamelCase : Optional[int] )-> Dict:
lowerCamelCase__ : Dict =self.num_labels
lowerCamelCase__ : List[str] =OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str =model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
(
lowerCamelCase__
) : List[Any] =config_and_inputs
lowerCamelCase__ : Tuple ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : List[str] )-> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case ( self : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any]=False )-> Union[str, Any]:
lowerCamelCase__ : str =super()._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__, return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Union[str, Any] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=UpperCAmelCase__, )
lowerCamelCase__ : Dict =inputs_dict['''labels''']
lowerCamelCase__ : Optional[int] =inputs_dict['''labels''']
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=UpperCAmelCase__, )
lowerCamelCase__ : Dict =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCAmelCase__ )
return inputs_dict
def snake_case ( self : str )-> Tuple:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModelTester(self )
lowerCamelCase__ : int =ConfigTester(self, config_class=UpperCAmelCase__, n_embd=37 )
def snake_case ( self : Optional[int] )-> List[Any]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def snake_case ( self : Any )-> Dict:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def snake_case ( self : Union[str, Any] )-> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any =OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Optional[int] )-> Dict:
lowerCamelCase__ : List[Any] =OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] =torch.tensor([[481, 4735, 544]], dtype=torch.long, device=UpperCAmelCase__ ) # the president is
lowerCamelCase__ : str =[
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist(), UpperCAmelCase__ )
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
_a = FlaxAutoencoderKL
@property
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =4
lowerCamelCase__ : str =3
lowerCamelCase__ : Dict =(32, 32)
lowerCamelCase__ : Dict =jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] =jax.random.uniform(snake_case__, ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : Any ={
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowerCamelCase__ : Tuple =self.dummy_input
return init_dict, inputs_dict
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : List[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Any=13, lowerCamelCase : int=32, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Any=4, lowerCamelCase : Optional[int]=[10, 20, 30, 40], lowerCamelCase : str=[2, 2, 3, 2], lowerCamelCase : Dict=True, lowerCamelCase : str=True, lowerCamelCase : Optional[int]=37, lowerCamelCase : str="gelu", lowerCamelCase : Optional[Any]=10, lowerCamelCase : List[Any]=0.02, lowerCamelCase : Dict=["stage2", "stage3", "stage4"], lowerCamelCase : List[Any]=[2, 3, 4], lowerCamelCase : int=None, )-> Optional[int]:
lowerCamelCase__ : Dict =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : Optional[int] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : List[Any] =num_stages
lowerCamelCase__ : str =hidden_sizes
lowerCamelCase__ : Union[str, Any] =depths
lowerCamelCase__ : int =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : Tuple =hidden_act
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[Any] =initializer_range
lowerCamelCase__ : int =out_features
lowerCamelCase__ : Union[str, Any] =out_indices
lowerCamelCase__ : Union[str, Any] =scope
def snake_case ( self : Dict )-> List[str]:
lowerCamelCase__ : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] =None
if self.use_labels:
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] )-> Union[str, Any]:
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=snake_case_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : str )-> Any:
lowerCamelCase__ : Union[str, Any] =ConvNextVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any )-> str:
lowerCamelCase__ : Optional[int] =ConvNextVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCamelCase__ : str =model(snake_case_, labels=snake_case_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[Any] )-> str:
lowerCamelCase__ : Tuple =ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCamelCase__ : str =model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def snake_case ( self : int )-> List[str]:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
lowerCamelCase__ : str =config_and_inputs
lowerCamelCase__ : Dict ={"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =config_and_inputs
lowerCamelCase__ : Union[str, Any] ={"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =ConvNextVaModelTester(self )
lowerCamelCase__ : List[str] =ConfigTester(self, config_class=snake_case_, has_text_modality=snake_case_, hidden_size=37 )
def snake_case ( self : Tuple )-> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : int )-> Any:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def snake_case ( self : Optional[int] )-> List[str]:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def snake_case ( self : Optional[int] )-> Dict:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def snake_case ( self : List[str] )-> int:
pass
def snake_case ( self : Any )-> str:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Tuple =True
if model_class.__name__ in [
*get_values(snake_case_ ),
*get_values(snake_case_ ),
]:
continue
lowerCamelCase__ : str =model_class(snake_case_ )
model.to(snake_case_ )
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(snake_case_, snake_case_, return_labels=snake_case_ )
lowerCamelCase__ : List[str] =model(**snake_case_ ).loss
loss.backward()
def snake_case ( self : str )-> Optional[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =False
lowerCamelCase__ : Optional[int] =True
if (
model_class.__name__
in [*get_values(snake_case_ ), *get_values(snake_case_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : Optional[int] =model_class(snake_case_ )
model.to(snake_case_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : Optional[int] =self._prepare_for_class(snake_case_, snake_case_, return_labels=snake_case_ )
lowerCamelCase__ : List[Any] =model(**snake_case_ ).loss
loss.backward()
def snake_case ( self : Tuple )-> Tuple:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict =model_class(snake_case_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Optional[Any] =["pixel_values"]
self.assertListEqual(arg_names[:1], snake_case_ )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def snake_case ( self : Tuple )-> Tuple:
def check_hidden_states_output(lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : int ):
lowerCamelCase__ : Optional[Any] =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**self._prepare_for_class(snake_case_, snake_case_ ) )
lowerCamelCase__ : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(snake_case_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(snake_case_, snake_case_, snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(snake_case_, snake_case_, snake_case_ )
def snake_case ( self : Dict )-> Tuple:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def snake_case ( self : Tuple )-> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict =ConvNextVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : List[str] )-> List[str]:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def snake_case ( self : Tuple )-> Dict:
lowerCamelCase__ : List[Any] =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(snake_case_ )
lowerCamelCase__ : Any =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_img()
lowerCamelCase__ : Tuple =preprocessor(images=snake_case_, return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**snake_case_ )
# verify the logits
lowerCamelCase__ : str =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, snake_case_ )
lowerCamelCase__ : Any =torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], snake_case_, atol=1E-4 ) )
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowercase = HUGGINGFACE_HUB_CACHE
_lowercase = "config.json"
_lowercase = "diffusion_pytorch_model.bin"
_lowercase = "diffusion_flax_model.msgpack"
_lowercase = "model.onnx"
_lowercase = "diffusion_pytorch_model.safetensors"
_lowercase = "weights.pb"
_lowercase = "https://huggingface.co"
_lowercase = default_cache_path
_lowercase = "diffusers_modules"
_lowercase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_lowercase = ["fp16", "non-ema"]
_lowercase = ".self_attn"
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Tuple = "bart"
_lowercase : str = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def snake_case__ ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCamelCase__ : str =AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowerCamelCase__ : Optional[Any] =AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowerCamelCase__ : Any =qar_model.eval()
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =(None, None)
if MODEL_TYPE == "bart":
lowerCamelCase__ : List[str] =AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowerCamelCase__ : Union[str, Any] =AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowerCamelCase__ : List[str] =torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowerCamelCase__ : Any =sas_model.eval()
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def snake_case__ ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCamelCase__ : Optional[Any] =faiss.StandardGpuResources()
lowerCamelCase__ : List[Any] =datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
lowerCamelCase__ : Optional[Any] =np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
lowerCamelCase__ : int =faiss.IndexFlatIP(128 )
lowerCamelCase__ : Dict =faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
lowerCamelCase__ , lowerCamelCase__ : Any =(None, None)
lowerCamelCase__ : int =Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowerCamelCase__ : Any =elia['''train_eli5''']
lowerCamelCase__ : List[str] =np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
lowerCamelCase__ : Dict =faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
_lowercase , _lowercase , _lowercase : List[Any] = load_indexes()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = load_models()
_lowercase , _lowercase : Union[str, Any] = load_train_data()
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[str]=10 ):
"""simple docstring"""
lowerCamelCase__ : Any =embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : str =eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ : str =[elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str="wiki40b" , __lowerCamelCase : Tuple="dense" , __lowerCamelCase : Optional[int]=10 ):
"""simple docstring"""
if source == "none":
lowerCamelCase__ , lowerCamelCase__ : Tuple =(''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCamelCase__ , lowerCamelCase__ : int =query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowerCAmelCase , )
lowerCamelCase__ : Optional[int] =[
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowerCamelCase__ : List[str] ='''question: {} context: {}'''.format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCamelCase : None),
} )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : Any=256 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=2 , __lowerCamelCase : str=0.95 , __lowerCamelCase : Optional[Any]=0.8 ):
"""simple docstring"""
with torch.no_grad():
lowerCamelCase__ : int =qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
_lowercase : Optional[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
_lowercase : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : Tuple = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
_lowercase : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
_lowercase : Dict = st.sidebar.selectbox(
"",
action_list,
index=3,
)
_lowercase : Union[str, Any] = action_list.index(action_st)
_lowercase : Optional[Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
_lowercase : Optional[Any] = show_type == "Show full text of passages"
else:
_lowercase : Union[str, Any] = 3
_lowercase : Any = True
_lowercase : Optional[int] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
_lowercase : Union[str, Any] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
_lowercase : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
_lowercase : List[str] = "wiki40b"
_lowercase : Any = "dense"
_lowercase : List[str] = "beam"
_lowercase : Union[str, Any] = 2
_lowercase : List[str] = 6_4
_lowercase : str = 2_5_6
_lowercase : Any = None
_lowercase : Optional[Any] = None
_lowercase : List[Any] = st.sidebar.checkbox("Generation options")
if generate_options:
_lowercase : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
_lowercase : Dict = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
_lowercase : Optional[Any] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_lowercase : List[Any] = st.sidebar.slider(
"Maximum generation length", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_lowercase : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : int = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Dict = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
_lowercase : int = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : str = st.text_input("Enter your question here:", "")
else:
_lowercase : str = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase , _lowercase : Any = make_support(question, source=wiki_source, method="dense", n_results=1_0)
_lowercase , _lowercase : int = make_support(question, source=wiki_source, method="sparse", n_results=1_0)
_lowercase : str = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Optional[Any] = support_list[:1_0]
_lowercase : List[Any] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
_lowercase , _lowercase : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_lowercase , _lowercase : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
_lowercase : str = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
_lowercase : Union[str, Any] = res[1].strip()
if sec_titles == "":
_lowercase : int = "[{}]({})".format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(" & ")
_lowercase : Tuple = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : Dict = find_nearest_training(question)
_lowercase : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
_lowercase : List[Any] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
_lowercase : Optional[int] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowercase : Tuple =True
except (ImportError, AttributeError):
_lowercase : Optional[Any] =object
def snake_case__ ( *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
"""simple docstring"""
pass
_lowercase : List[Any] =False
_lowercase : List[Any] =logging.get_logger("transformers-cli/serving")
def snake_case__ ( __lowerCamelCase : Namespace ):
"""simple docstring"""
lowerCamelCase__ : Any =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase_ , args.host , args.port , args.workers )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
_a = 4_2
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
@staticmethod
def snake_case ( lowerCamelCase : Dict )-> Optional[int]:
lowerCamelCase__ : Any =parser.add_parser(
'''serve''', help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''', type=__lowerCamelCase, choices=get_supported_tasks(), help='''The task to run the pipeline on''', )
serve_parser.add_argument('''--host''', type=__lowerCamelCase, default='''localhost''', help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''', type=__lowerCamelCase, default=8888, help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''', type=__lowerCamelCase, default=1, help='''Number of http workers''' )
serve_parser.add_argument('''--model''', type=__lowerCamelCase, help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''', type=__lowerCamelCase, help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''', type=__lowerCamelCase, help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''', type=__lowerCamelCase, default=-1, help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''', )
serve_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] )-> List[str]:
lowerCamelCase__ : List[Any] =pipeline
lowerCamelCase__ : int =host
lowerCamelCase__ : Optional[Any] =port
lowerCamelCase__ : Optional[int] =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowerCamelCase__ : Optional[int] =FastAPI(
routes=[
APIRoute(
'''/''', self.model_info, response_model=__lowerCamelCase, response_class=__lowerCamelCase, methods=['''GET'''], ),
APIRoute(
'''/tokenize''', self.tokenize, response_model=__lowerCamelCase, response_class=__lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/detokenize''', self.detokenize, response_model=__lowerCamelCase, response_class=__lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/forward''', self.forward, response_model=__lowerCamelCase, response_class=__lowerCamelCase, methods=['''POST'''], ),
], timeout=600, )
def snake_case ( self : Union[str, Any] )-> Optional[Any]:
run(self._app, host=self.host, port=self.port, workers=self.workers )
def snake_case ( self : int )-> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case ( self : int, lowerCamelCase : Optional[Any] = Body(__lowerCamelCase, embed=__lowerCamelCase ), lowerCamelCase : Union[str, Any] = Body(__lowerCamelCase, embed=__lowerCamelCase ) )-> Any:
try:
lowerCamelCase__ : Tuple =self._pipeline.tokenizer.tokenize(__lowerCamelCase )
if return_ids:
lowerCamelCase__ : Tuple =self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCamelCase )
return ServeTokenizeResult(tokens=__lowerCamelCase, tokens_ids=__lowerCamelCase )
else:
return ServeTokenizeResult(tokens=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(__lowerCamelCase )} )
def snake_case ( self : int, lowerCamelCase : Dict = Body(__lowerCamelCase, embed=__lowerCamelCase ), lowerCamelCase : List[Any] = Body(__lowerCamelCase, embed=__lowerCamelCase ), lowerCamelCase : Tuple = Body(__lowerCamelCase, embed=__lowerCamelCase ), )-> List[str]:
try:
lowerCamelCase__ : List[str] =self._pipeline.tokenizer.decode(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return ServeDeTokenizeResult(model='''''', text=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(__lowerCamelCase )} )
async def snake_case ( self : int, lowerCamelCase : Optional[Any]=Body(__lowerCamelCase, embed=__lowerCamelCase ) )-> Optional[Any]:
if len(__lowerCamelCase ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
lowerCamelCase__ : int =self._pipeline(__lowerCamelCase )
return ServeForwardResult(output=__lowerCamelCase )
except Exception as e:
raise HTTPException(500, {'''error''': str(__lowerCamelCase )} )
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.