code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def A ( snake_case :Any ) -> List[Any]:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = sum(snake_case )
__UpperCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCamelCase = True
for i in range(1 , s + 1 ):
__UpperCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCamelCase = s - 2 * j
break
return diff
| 316
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( snake_case :Dict ) -> int:
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def A ( snake_case :Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__UpperCamelCase = s_dict.pop(snake_case )
elif "subsample" in key:
__UpperCamelCase = s_dict.pop(snake_case )
def A ( snake_case :str ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(snake_case , snake_case , bias=snake_case )
__UpperCamelCase = emb.weight.data
return lin_layer
def A ( snake_case :Optional[int] , snake_case :List[Any] ) -> Union[str, Any]:
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )
__UpperCamelCase = mam_aaa['args']
__UpperCamelCase = mam_aaa['model']
__UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(snake_case )
rename_keys(snake_case )
__UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = args.share_decoder_input_output_embed
__UpperCamelCase = [int(snake_case ) for i in args.conv_kernel_sizes.split(',' )]
__UpperCamelCase = SpeechaTextConfig(
vocab_size=snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=snake_case , num_beams=5 , max_length=2_0_0 , use_cache=snake_case , decoder_start_token_id=2 , early_stopping=snake_case , )
__UpperCamelCase = SpeechaTextForConditionalGeneration(snake_case )
__UpperCamelCase , __UpperCamelCase = model.model.load_state_dict(snake_case , strict=snake_case )
if len(snake_case ) > 0 and not set(snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
__UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase = lm_head_weights
model.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase : Optional[int] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from math import pow
def A ( snake_case :int , snake_case :int , snake_case :int , snake_case :int , snake_case :int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__UpperCamelCase = int(pow(snake_case , snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__UpperCamelCase , __UpperCamelCase = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__UpperCamelCase , __UpperCamelCase = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
return current_sum, solutions_count
def A ( snake_case :int , snake_case :int ) -> int:
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(snake_case , snake_case , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "switch_transformers"
lowercase = ["past_key_values"]
lowercase = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __UpperCAmelCase=3_2128 , __UpperCAmelCase=768 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=64 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.0_1 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = d_kv
__UpperCamelCase = d_ff
__UpperCamelCase = num_sparse_encoder_layers
__UpperCamelCase = num_layers
__UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCamelCase = num_heads
__UpperCamelCase = num_experts
__UpperCamelCase = expert_capacity
__UpperCamelCase = router_bias
__UpperCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
__UpperCamelCase = router_dtype
__UpperCamelCase = router_ignore_padding_tokens
__UpperCamelCase = relative_attention_num_buckets
__UpperCamelCase = relative_attention_max_distance
__UpperCamelCase = dropout_rate
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_factor
__UpperCamelCase = feed_forward_proj
__UpperCamelCase = use_cache
__UpperCamelCase = add_router_probs
__UpperCamelCase = router_z_loss_coef
__UpperCamelCase = router_aux_loss_coef
__UpperCamelCase = self.feed_forward_proj.split('-' )
__UpperCamelCase = act_info[-1]
__UpperCamelCase = act_info[0] == 'gated'
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCamelCase = 'gelu_new'
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 316
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316
| 1
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A ( snake_case :Optional[int] , snake_case :str ) -> str:
__UpperCamelCase = old_name
if "patch_embed" in old_name:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_name.split('.' )
if layer == "0":
__UpperCamelCase = old_name.replace('0' , 'convolution1' )
elif layer == "1":
__UpperCamelCase = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__UpperCamelCase = old_name.replace('3' , 'convolution2' )
else:
__UpperCamelCase = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , snake_case ):
__UpperCamelCase = r'\b\d{2}\b'
if bool(re.search(snake_case , snake_case ) ):
__UpperCamelCase = re.search(r'\d\.\d\d.' , snake_case ).group()
else:
__UpperCamelCase = re.search(r'\d\.\d.' , snake_case ).group()
if int(match[0] ) < 6:
__UpperCamelCase = old_name.replace(snake_case , '' )
__UpperCamelCase = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__UpperCamelCase = 'intermediate_stages.' + trimmed_name
else:
__UpperCamelCase = old_name.replace(snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__UpperCamelCase = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__UpperCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
__UpperCamelCase = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__UpperCamelCase = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__UpperCamelCase = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__UpperCamelCase = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__UpperCamelCase = trimmed_name.replace('fc2' , 'linear_out' )
__UpperCamelCase = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , snake_case ):
__UpperCamelCase = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__UpperCamelCase = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__UpperCamelCase = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__UpperCamelCase = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__UpperCamelCase = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__UpperCamelCase = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__UpperCamelCase = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__UpperCamelCase = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__UpperCamelCase = new_name.replace('norm' , 'layernorm' )
__UpperCamelCase = 'efficientformer.' + new_name
else:
__UpperCamelCase = 'efficientformer.encoder.' + new_name
return new_name
def A ( snake_case :Dict , snake_case :str ) -> Tuple:
for key in checkpoint.copy().keys():
__UpperCamelCase = checkpoint.pop(snake_case )
__UpperCamelCase = val
return checkpoint
def A ( ) -> Union[str, Any]:
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
def A ( snake_case :Path , snake_case :Path , snake_case :Path , snake_case :bool ) -> Dict:
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )['model']
__UpperCamelCase = EfficientFormerConfig.from_json_file(snake_case )
__UpperCamelCase = EfficientFormerForImageClassificationWithTeacher(snake_case )
__UpperCamelCase = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__UpperCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
__UpperCamelCase = convert_torch_checkpoint(snake_case , snake_case )
model.load_state_dict(snake_case )
model.eval()
__UpperCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__UpperCamelCase = prepare_img()
__UpperCamelCase = 2_5_6
__UpperCamelCase = 2_2_4
__UpperCamelCase = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__UpperCamelCase = processor(images=snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__UpperCamelCase = Compose(
[
Resize(snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(snake_case ),
ToTensor(),
Normalize(snake_case , snake_case ),
] )
__UpperCamelCase = image_transforms(snake_case ).unsqueeze(0 )
assert torch.allclose(snake_case , snake_case )
__UpperCamelCase = model(snake_case )
__UpperCamelCase = outputs.logits
__UpperCamelCase = (1, 1_0_0_0)
if "l1" in model_name:
__UpperCamelCase = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :1_0] , snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__UpperCamelCase = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :1_0] , snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__UpperCamelCase = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(snake_case )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=snake_case , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=snake_case , )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = {}
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = super().add_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=1 , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
output.append(__UpperCAmelCase )
else:
__UpperCamelCase = []
for i in range(__UpperCAmelCase ):
__UpperCamelCase = placeholder_token + F'_{i}'
self.try_adding_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
output.append(__UpperCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
__UpperCamelCase = output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = []
for i in range(len(__UpperCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__UpperCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__UpperCamelCase = self.token_map[placeholder_token]
__UpperCamelCase = tokens[: 1 + int(len(__UpperCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__UpperCamelCase = copy.copy(__UpperCAmelCase )
random.shuffle(__UpperCAmelCase )
__UpperCamelCase = text.replace(__UpperCAmelCase , ' '.join(__UpperCAmelCase ) )
return text
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__UpperCAmelCase , vector_shuffle=__UpperCAmelCase , prop_tokens_to_load=__UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__UpperCAmelCase , vector_shuffle=__UpperCAmelCase , prop_tokens_to_load=__UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase , )
| 316
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316
| 1
|
"""simple docstring"""
import re
def A ( snake_case :str ) -> str:
if len(re.findall('[ATCG]' , snake_case ) ) != len(snake_case ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def A ( snake_case :float , snake_case :float , snake_case :float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def A ( snake_case :np.ndarray , snake_case :tuple[int, int] , snake_case :tuple[int, int] , snake_case :bool , ) -> tuple[float | int, list[tuple[int, int]]]:
__UpperCamelCase , __UpperCamelCase = grid.shape
__UpperCamelCase = [-1, 1, 0, 0]
__UpperCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCamelCase , __UpperCamelCase = [(0, source)], set()
__UpperCamelCase = np.full((rows, cols) , np.inf )
__UpperCamelCase = 0
__UpperCamelCase = np.empty((rows, cols) , dtype=snake_case )
__UpperCamelCase = None
while queue:
((__UpperCamelCase) , (__UpperCamelCase)) = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCamelCase = []
while (x, y) != source:
path.append((x, y) )
__UpperCamelCase , __UpperCamelCase = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__UpperCamelCase , __UpperCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__UpperCamelCase = dist + 1
__UpperCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase : Dict = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase : Union[str, Any] = concatenate_datasets
UpperCamelCase : Any = DownloadConfig
UpperCamelCase : List[Any] = DownloadManager
UpperCamelCase : Dict = DownloadMode
UpperCamelCase : Any = DownloadConfig
UpperCamelCase : Union[str, Any] = DownloadMode
UpperCamelCase : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def A ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 316
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
lowercase = PegasusConfig
lowercase = {}
lowercase = "gelu"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=40 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = TFPegasusModel(config=__UpperCAmelCase ).get_decoder()
__UpperCamelCase = inputs_dict['input_ids']
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict['attention_mask'][:1, :]
__UpperCamelCase = inputs_dict['head_mask']
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def A ( snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[Any]=None , snake_case :Dict=None , snake_case :Tuple=None , snake_case :Dict=None , snake_case :Any=None , ) -> str:
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowercase = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase = "google/pegasus-xsum"
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.translate_src_text(**__UpperCAmelCase )
assert self.expected_text == generated_words
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='tf' )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )
return generated_words
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = CycleDiffusionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase = CLIPTextModel(__UpperCAmelCase )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = CycleDiffusionPipeline(**__UpperCAmelCase )
__UpperCamelCase = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCamelCase = pipe(**__UpperCAmelCase )
__UpperCamelCase = output.images
__UpperCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , 'half' ):
__UpperCamelCase = module.half()
__UpperCamelCase = CycleDiffusionPipeline(**__UpperCAmelCase )
__UpperCamelCase = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCamelCase = pipe(**__UpperCAmelCase )
__UpperCamelCase = output.images
__UpperCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__UpperCamelCase = init_image.resize((512, 512) )
__UpperCamelCase = 'CompVis/stable-diffusion-v1-4'
__UpperCamelCase = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
__UpperCamelCase = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase = 'A black colored car'
__UpperCamelCase = 'A blue colored car'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type='np' , )
__UpperCamelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__UpperCamelCase = init_image.resize((512, 512) )
__UpperCamelCase = 'CompVis/stable-diffusion-v1-4'
__UpperCamelCase = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
__UpperCamelCase = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase = 'A black colored car'
__UpperCamelCase = 'A blue colored car'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type='np' , )
__UpperCamelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 316
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
__UpperCamelCase = None
def A ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
__UpperCamelCase = input('Enter the value of the root node: ' ).strip().lower()
__UpperCamelCase = queue.Queue()
__UpperCamelCase = TreeNode(int(snake_case ) )
q.put(snake_case )
while not q.empty():
__UpperCamelCase = q.get()
__UpperCamelCase = f'Enter the left node of {node_found.data}: '
__UpperCamelCase = input(snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__UpperCamelCase = TreeNode(int(snake_case ) )
__UpperCamelCase = left_node
q.put(snake_case )
__UpperCamelCase = f'Enter the right node of {node_found.data}: '
__UpperCamelCase = input(snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__UpperCamelCase = TreeNode(int(snake_case ) )
__UpperCamelCase = right_node
q.put(snake_case )
raise
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = queue.Queue()
q.put(snake_case )
while not q.empty():
__UpperCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = queue.Queue()
q.put(snake_case )
while not q.empty():
__UpperCamelCase = []
while not q.empty():
__UpperCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = []
__UpperCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(snake_case )
__UpperCamelCase = n.left
# end of while means current node doesn't have left child
__UpperCamelCase = stack.pop()
# start to traverse its right child
__UpperCamelCase = n.right
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = []
__UpperCamelCase = node
while n or stack:
while n:
stack.append(snake_case )
__UpperCamelCase = n.left
__UpperCamelCase = stack.pop()
print(n.data , end=',' )
__UpperCamelCase = n.right
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase , __UpperCamelCase = [], []
__UpperCamelCase = node
stacka.append(snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
__UpperCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A ( snake_case :str = "" , snake_case :str=5_0 , snake_case :Any="*" ) -> str:
if not s:
return "\n" + width * char
__UpperCamelCase , __UpperCamelCase = divmod(width - len(snake_case ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def A ( snake_case :int ) -> List[Any]:
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def A ( snake_case :Dict ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(snake_case , snake_case , bias=snake_case )
__UpperCamelCase = emb.weight.data
return lin_layer
def A ( snake_case :Optional[Any] , snake_case :Optional[int]="facebook/mbart-large-en-ro" , snake_case :List[str]=False , snake_case :Optional[Any]=False ) -> Optional[Any]:
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )['model']
remove_ignore_keys_(snake_case )
__UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(snake_case , vocab_size=snake_case )
if mbart_aa and finetuned:
__UpperCamelCase = 'relu'
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = MBartForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
UpperCamelCase : Optional[int] = parser.parse_args()
UpperCamelCase : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : str = "▁"
UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase : Optional[Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCamelCase : List[Any] = {
"xlm-roberta-base": 5_1_2,
"xlm-roberta-large": 5_1_2,
"xlm-roberta-large-finetuned-conll02-dutch": 5_1_2,
"xlm-roberta-large-finetuned-conll02-spanish": 5_1_2,
"xlm-roberta-large-finetuned-conll03-english": 5_1_2,
"xlm-roberta-large-finetuned-conll03-german": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
__UpperCamelCase = len(self.sp_model ) + self.fairseq_offset
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 1
|
"""simple docstring"""
from math import pi, sqrt
def A ( snake_case :float ) -> float:
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def A ( ) -> None:
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : Optional[int] = 1.0
while num:
UpperCamelCase : List[str] = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 316
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
| 316
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :str , snake_case :int ) -> list:
__UpperCamelCase = word.split()
def justify(snake_case :list , snake_case :int , snake_case :int ) -> str:
__UpperCamelCase = max_width - width
__UpperCamelCase = len(snake_case )
if len(snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case ):
num_spaces_between_words_list[i] += 1
__UpperCamelCase = []
for i in range(snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case )
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = 0
for word in words:
if width + len(snake_case ) + len(snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case )
width += len(snake_case )
else:
# justify the line and add it to result
answer.append(justify(snake_case , snake_case , snake_case ) )
# reset new line and new width
__UpperCamelCase , __UpperCamelCase = [word], len(snake_case )
__UpperCamelCase = max_width - width - len(snake_case )
answer.append(' '.join(snake_case ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = GPTaTokenizer
lowercase = GPTaTokenizerFast
lowercase = True
lowercase = {"add_prefix_space": True}
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = 'lower newer'
# Testing tokenization
__UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids without special tokens
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids with special tokens
__UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing the unknown token
__UpperCamelCase = tokens + [rust_tokenizer.unk_token]
__UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# Simple input
__UpperCamelCase = 'This is a simple input'
__UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase = ('This is a simple input', 'This is a pair')
__UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase = 'This is a simple input'
__UpperCamelCase = ['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase = ('This is a simple input', 'This is a pair')
__UpperCamelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase = tokenizer.pad_token_id
__UpperCamelCase = tokenizer(__UpperCAmelCase , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors='np' )
__UpperCamelCase = tokenizer(*__UpperCAmelCase , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = '$$$'
__UpperCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )
__UpperCamelCase = 'This is a simple input'
__UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = tokenizer(__UpperCAmelCase )
__UpperCamelCase = tokenizer(__UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase = tokenizer.decode(out_s.input_ids )
__UpperCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [self.get_tokenizer(do_lower_case=__UpperCAmelCase , add_bos_token=__UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__UpperCamelCase = 'Encode this.'
__UpperCamelCase = 'This one too please.'
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
encoded_sequence += tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__UpperCamelCase = tokenizer.encode_plus(
__UpperCAmelCase , __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , )
__UpperCamelCase = encoded_sequence_dict['input_ids']
__UpperCamelCase = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
__UpperCamelCase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__UpperCAmelCase )
]
__UpperCamelCase = [x for x in filtered_sequence if x is not None]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__UpperCAmelCase )
__UpperCamelCase = 'A photo of a cat'
__UpperCamelCase = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase = AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=__UpperCAmelCase )
__UpperCamelCase = 'A photo of a cat'
__UpperCamelCase = tokenizer.encode(
__UpperCAmelCase , )
# Same as above
self.assertEqual(__UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__UpperCAmelCase )
__UpperCamelCase = 'bos'
__UpperCamelCase = tokenizer.get_vocab()['bos']
__UpperCamelCase = 'A photo of a cat'
__UpperCamelCase = tokenizer.encode(
__UpperCAmelCase , )
# We changed the bos token
self.assertEqual(__UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
| 316
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316
| 1
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = (UnCLIPScheduler,)
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**__UpperCAmelCase )
return config
def UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCAmelCase , prev_timestep=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCAmelCase ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__UpperCAmelCase ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__UpperCAmelCase ) - -0.0_0_1_0_0_1_1 < 1E-5
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) )
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(25 )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(__UpperCAmelCase ):
# 1. predict noise residual
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
__UpperCamelCase = None
else:
__UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prev_timestep=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) )
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 316
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A ( snake_case :Sequence[float] , snake_case :int , snake_case :int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCamelCase = (low + high) // 2
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(snake_case , snake_case , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(snake_case , mid + 1 , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_cross_sum(snake_case , snake_case , snake_case , snake_case )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A ( snake_case :Sequence[float] , snake_case :int , snake_case :int , snake_case :int ) -> tuple[int, int, float]:
__UpperCamelCase , __UpperCamelCase = float('-inf' ), -1
__UpperCamelCase , __UpperCamelCase = float('-inf' ), -1
__UpperCamelCase = 0
for i in range(snake_case , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCamelCase = summ
__UpperCamelCase = i
__UpperCamelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCamelCase = summ
__UpperCamelCase = i
return max_left, max_right, (left_sum + right_sum)
def A ( snake_case :int ) -> float:
__UpperCamelCase = [randint(1 , snake_case ) for _ in range(snake_case )]
__UpperCamelCase = time.time()
max_subarray(snake_case , 0 , input_size - 1 )
__UpperCamelCase = time.time()
return end - start
def A ( ) -> None:
__UpperCamelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__UpperCamelCase = [time_max_subarray(snake_case ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(snake_case , snake_case ):
print(snake_case , '\t\t' , snake_case )
plt.plot(snake_case , snake_case )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = only_cross_attention
__UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
__UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__UpperCamelCase = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase )
else:
__UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
__UpperCamelCase = Attention(
query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__UpperCamelCase = (
AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
)
__UpperCamelCase = Attention(
query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
__UpperCamelCase = None
__UpperCamelCase = None
# 3. Feed-forward
__UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
__UpperCamelCase = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase )
# let chunk size default to None
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = chunk_size
__UpperCamelCase = dim
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
__UpperCamelCase = self.norma(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.norma(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype )
else:
__UpperCamelCase = self.norma(__UpperCAmelCase )
__UpperCamelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__UpperCamelCase = self.attna(
__UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = gate_msa.unsqueeze(1 ) * attn_output
__UpperCamelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__UpperCamelCase = (
self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase )
)
__UpperCamelCase = self.attna(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = attn_output + hidden_states
# 3. Feed-forward
__UpperCamelCase = self.norma(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__UpperCamelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__UpperCamelCase = torch.cat(
[self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__UpperCamelCase = self.ff(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = gate_mlp.unsqueeze(1 ) * ff_output
__UpperCamelCase = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = int(dim * mult )
__UpperCamelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__UpperCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase )
if activation_fn == "gelu-approximate":
__UpperCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='tanh' )
elif activation_fn == "geglu":
__UpperCamelCase = GEGLU(__UpperCAmelCase , __UpperCAmelCase )
elif activation_fn == "geglu-approximate":
__UpperCamelCase = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = nn.ModuleList([] )
# project in
self.net.append(__UpperCAmelCase )
# project dropout
self.net.append(nn.Dropout(__UpperCAmelCase ) )
# project out
self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for module in self.net:
__UpperCamelCase = module(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = approximate
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.proj(__UpperCAmelCase )
__UpperCamelCase = self.gelu(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Linear(__UpperCAmelCase , dim_out * 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__UpperCAmelCase )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.proj(__UpperCAmelCase )
return x * torch.sigmoid(1.7_0_2 * x )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Linear(__UpperCAmelCase , embedding_dim * 2 )
__UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) )
__UpperCamelCase , __UpperCamelCase = torch.chunk(__UpperCAmelCase , 2 )
__UpperCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase )
__UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1E-6 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__UpperCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = emb.chunk(6 , dim=1 )
__UpperCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1E-5 ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = num_groups
__UpperCamelCase = eps
if act_fn is None:
__UpperCamelCase = None
else:
__UpperCamelCase = get_activation(__UpperCAmelCase )
__UpperCamelCase = nn.Linear(__UpperCAmelCase , out_dim * 2 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.act:
__UpperCamelCase = self.act(__UpperCAmelCase )
__UpperCamelCase = self.linear(__UpperCAmelCase )
__UpperCamelCase = emb[:, :, None, None]
__UpperCamelCase , __UpperCamelCase = emb.chunk(2 , dim=1 )
__UpperCamelCase = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps )
__UpperCamelCase = x * (1 + scale) + shift
return x
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = load_tool('text-classification' )
self.tool.setup()
__UpperCamelCase = load_tool('text-classification' , remote=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
|
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(snake_case , snake_case ):
raise TypeError('Input value must be a \'int\' type' )
return bin(snake_case ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316
| 1
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase : Union[str, Any] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( snake_case :List[str] , snake_case :str , snake_case :List[str] , snake_case :str , snake_case :Optional[Any] , snake_case :Optional[Any] ) -> List[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def A ( snake_case :str , snake_case :Optional[str] = None ) -> None:
__UpperCamelCase = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , snake_case ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = requirement, None, None
else:
__UpperCamelCase = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
__UpperCamelCase , __UpperCamelCase = match[0]
__UpperCamelCase = want_full.split(',' ) # there could be multiple requirements
__UpperCamelCase = {}
for w in want_range:
__UpperCamelCase = re.findall(r'^([\s!=<>]{1,2})(.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
__UpperCamelCase , __UpperCamelCase = match[0]
__UpperCamelCase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
__UpperCamelCase = '.'.join([str(snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return
# check if any version is installed
try:
__UpperCamelCase = importlib.metadata.version(snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def A ( snake_case :Union[str, Any] ) -> Any:
__UpperCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case , snake_case )
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316
| 1
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCamelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCamelCase : Dict = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase : Tuple = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A ( snake_case :Optional[Any]=None , snake_case :Union[str, Any]=None ) -> Dict:
return field(default_factory=lambda: default , metadata=snake_case )
@dataclass
class __lowerCAmelCase :
lowercase = field(
metadata={"help": "The csv file to plot."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Disable logarithmic scale when plotting"} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
lowercase = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def A ( snake_case :Tuple ) -> List[Any]:
try:
int(snake_case )
return True
except ValueError:
return False
def A ( snake_case :str ) -> List[Any]:
try:
float(snake_case )
return True
except ValueError:
return False
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = args
__UpperCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__UpperCamelCase = csv.DictReader(__UpperCAmelCase )
for row in reader:
__UpperCamelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__UpperCamelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__UpperCamelCase = float(row['result'] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = plt.subplots()
__UpperCamelCase = 'Time usage' if self.args.is_time else 'Memory usage'
__UpperCamelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__UpperCamelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
__UpperCamelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
__UpperCamelCase = self.result_dict[model_name]['result']
((__UpperCamelCase) , (__UpperCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__UpperCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__UpperCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCAmelCase , )
else:
__UpperCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__UpperCamelCase) , (__UpperCamelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__UpperCamelCase = np.asarray(__UpperCAmelCase , __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase , __UpperCAmelCase , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(__UpperCAmelCase , __UpperCAmelCase , '--' )
title_str += F' {label_model_name} vs.'
__UpperCamelCase = title_str[:-4]
__UpperCamelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A ( ) -> Tuple:
__UpperCamelCase = HfArgumentParser(snake_case )
__UpperCamelCase = parser.parse_args_into_dataclasses()[0]
__UpperCamelCase = Plot(args=snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'height': 20, 'width': 20}
__UpperCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_reduce_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A ( ) -> str:
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(dataset[0]['file'] )
__UpperCamelCase = Image.open(dataset[1]['file'] )
return image, map
def A ( ) -> Union[str, Any]:
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = Image.open(ds[1]['file'] )
__UpperCamelCase = Image.open(ds[2]['file'] )
__UpperCamelCase = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__UpperCamelCase = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
__UpperCamelCase , __UpperCamelCase = prepare_semantic_single_inputs()
__UpperCamelCase = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
__UpperCamelCase , __UpperCamelCase = prepare_semantic_batch_inputs()
__UpperCamelCase = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__UpperCamelCase , __UpperCamelCase = prepare_semantic_single_inputs()
__UpperCamelCase = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
__UpperCamelCase = True
__UpperCamelCase = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :list[int] , snake_case :int ) -> bool:
if len(snake_case ) == 0:
return False
__UpperCamelCase = len(snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case )
if __name__ == "__main__":
UpperCamelCase : List[Any] = input("Enter numbers separated by comma:\n").strip()
UpperCamelCase : int = [int(item.strip()) for item in user_input.split(",")]
UpperCamelCase : Optional[Any] = int(input("Enter the number to be found in the list:\n").strip())
UpperCamelCase : Dict = "" if binary_search(sequence, target) else "not "
print(f'''{target} was {not_str}found in {sequence}''')
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
__UpperCamelCase = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase = int(self.size['shortest_edge'] * h / w )
__UpperCamelCase = self.size['shortest_edge']
elif w > h:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = self.size['shortest_edge']
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
__UpperCamelCase = DeformableDetrImageProcessor()
__UpperCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
__UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
__UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
__UpperCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
__UpperCamelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
__UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
__UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["vqvae"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
__UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
__UpperCamelCase = noise
__UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = self.mel.audio_slice_to_image(__UpperCAmelCase )
__UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__UpperCamelCase = (input_image / 255) * 2 - 1
__UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
__UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__UpperCamelCase = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__UpperCamelCase = int(mask_start_secs * pixels_per_second )
__UpperCamelCase = int(mask_end_secs * pixels_per_second )
__UpperCamelCase = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )['sample']
else:
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase )['sample']
if isinstance(self.scheduler , __UpperCAmelCase ):
__UpperCamelCase = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
else:
__UpperCamelCase = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
__UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
__UpperCamelCase = self.vqvae.decode(__UpperCAmelCase )['sample']
__UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__UpperCamelCase = (images * 255).round().astype('uint8' )
__UpperCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__UpperCamelCase = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
__UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__UpperCamelCase = (sample / 255) * 2 - 1
__UpperCamelCase = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__UpperCamelCase = self.scheduler.alphas_cumprod[t]
__UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__UpperCamelCase = 1 - alpha_prod_t
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase )['sample']
__UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 316
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :Optional[Any] , snake_case :List[Any] ) -> str:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(snake_case ):
for j in range(snake_case ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def A ( snake_case :List[Any] , snake_case :str ) -> List[str]:
__UpperCamelCase = [[float('inf' ) for _ in range(snake_case )] for _ in range(snake_case )]
for i in range(snake_case ):
for j in range(snake_case ):
__UpperCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(snake_case ):
# looping through rows of graph array
for i in range(snake_case ):
# looping through columns of graph array
for j in range(snake_case ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__UpperCamelCase = dist[i][k] + dist[k][j]
_print_dist(snake_case , snake_case )
return dist, v
if __name__ == "__main__":
UpperCamelCase : int = int(input("Enter number of vertices: "))
UpperCamelCase : Any = int(input("Enter number of edges: "))
UpperCamelCase : List[Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCamelCase : Optional[Any] = int(input("Enter source:"))
UpperCamelCase : List[str] = int(input("Enter destination:"))
UpperCamelCase : Optional[int] = float(input("Enter weight:"))
UpperCamelCase : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 316
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A ( snake_case :list[list[float]] ) -> list[list[float]]:
__UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCamelCase , __UpperCamelCase = matrix[1][1], matrix[0][0]
__UpperCamelCase , __UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCamelCase = array(snake_case )
for i in range(3 ):
for j in range(3 ):
__UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCamelCase = array(snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case )
# Calculate the inverse of the matrix
return [[float(d(snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 316
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = '</s>'
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
__UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCamelCase = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__UpperCamelCase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__UpperCamelCase = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__UpperCamelCase = 'To ensure a smooth flow of bank resolutions.'
__UpperCamelCase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__UpperCamelCase = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ['This is going to be way too long.' * 150, 'short example']
__UpperCamelCase = ['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='pt' )
__UpperCamelCase = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
__UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ['This is going to be way too long.' * 1000, 'short example']
__UpperCamelCase = ['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='pt' )
__UpperCamelCase = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__UpperCamelCase = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 316
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A ( snake_case :int ) -> Optional[int]:
__UpperCamelCase = checkpoints.load_tax_checkpoint(snake_case )
__UpperCamelCase = flatten_dict(snake_case )
return flax_params
def A ( snake_case :List[str] ) -> Tuple:
__UpperCamelCase = {}
__UpperCamelCase = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase = new_key.replace(snake_case , snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase = new_key.replace(snake_case , snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase = re.sub(r'layers_(\d+)' , r'layer.\1' , snake_case )
__UpperCamelCase = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase = re.sub(r'layers_(\d+)' , r'layer.\1' , snake_case )
__UpperCamelCase = flax_dict[key]
__UpperCamelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase = torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A ( snake_case :int , snake_case :List[Any] , snake_case :Optional[Any]=False , snake_case :str=False ) -> Dict:
__UpperCamelCase = get_flax_param(snake_case )
if not use_large:
__UpperCamelCase = PixaStructVisionConfig()
__UpperCamelCase = PixaStructTextConfig()
else:
__UpperCamelCase = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
__UpperCamelCase = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
__UpperCamelCase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case )
__UpperCamelCase = PixaStructForConditionalGeneration(snake_case )
__UpperCamelCase = rename_and_convert_flax_params(snake_case )
model.load_state_dict(snake_case )
__UpperCamelCase = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase = PixaStructImageProcessor()
__UpperCamelCase = PixaStructProcessor(image_processor=snake_case , tokenizer=snake_case )
if use_large:
__UpperCamelCase = 4_0_9_6
__UpperCamelCase = True
# mkdir if needed
os.makedirs(snake_case , exist_ok=snake_case )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
print('Model saved in {}'.format(snake_case ) )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 316
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case :list , snake_case :list ) -> list:
if len(snake_case ) != 2 or len(a[0] ) != 2 or len(snake_case ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__UpperCamelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def A ( snake_case :list , snake_case :list ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case ) )
]
def A ( snake_case :list , snake_case :list ) -> Tuple:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case ) )
]
def A ( snake_case :list ) -> tuple[list, list, list, list]:
if len(snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__UpperCamelCase = len(snake_case )
__UpperCamelCase = matrix_length // 2
__UpperCamelCase = [[a[i][j] for j in range(snake_case , snake_case )] for i in range(snake_case )]
__UpperCamelCase = [
[a[i][j] for j in range(snake_case , snake_case )] for i in range(snake_case , snake_case )
]
__UpperCamelCase = [[a[i][j] for j in range(snake_case )] for i in range(snake_case )]
__UpperCamelCase = [[a[i][j] for j in range(snake_case )] for i in range(snake_case , snake_case )]
return top_left, top_right, bot_left, bot_right
def A ( snake_case :list ) -> tuple[int, int]:
return len(snake_case ), len(matrix[0] )
def A ( snake_case :list ) -> None:
print('\n'.join(str(snake_case ) for line in matrix ) )
def A ( snake_case :list , snake_case :list ) -> list:
if matrix_dimensions(snake_case ) == (2, 2):
return default_matrix_multiplication(snake_case , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = split_matrix(snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = split_matrix(snake_case )
__UpperCamelCase = actual_strassen(snake_case , matrix_subtraction(snake_case , snake_case ) )
__UpperCamelCase = actual_strassen(matrix_addition(snake_case , snake_case ) , snake_case )
__UpperCamelCase = actual_strassen(matrix_addition(snake_case , snake_case ) , snake_case )
__UpperCamelCase = actual_strassen(snake_case , matrix_subtraction(snake_case , snake_case ) )
__UpperCamelCase = actual_strassen(matrix_addition(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
__UpperCamelCase = actual_strassen(matrix_subtraction(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
__UpperCamelCase = actual_strassen(matrix_subtraction(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
__UpperCamelCase = matrix_addition(matrix_subtraction(matrix_addition(snake_case , snake_case ) , snake_case ) , snake_case )
__UpperCamelCase = matrix_addition(snake_case , snake_case )
__UpperCamelCase = matrix_addition(snake_case , snake_case )
__UpperCamelCase = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case , snake_case ) , snake_case ) , snake_case )
# construct the new matrix from our 4 quadrants
__UpperCamelCase = []
for i in range(len(snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def A ( snake_case :list , snake_case :list ) -> list:
if matrix_dimensions(snake_case )[1] != matrix_dimensions(snake_case )[0]:
__UpperCamelCase = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(snake_case )
__UpperCamelCase = matrix_dimensions(snake_case )
__UpperCamelCase = matrix_dimensions(snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__UpperCamelCase = max(*snake_case , *snake_case )
__UpperCamelCase = int(math.pow(2 , math.ceil(math.loga(snake_case ) ) ) )
__UpperCamelCase = matrixa
__UpperCamelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__UpperCamelCase = actual_strassen(snake_case , snake_case )
# Removing the additional zeros
for i in range(0 , snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase : List[str] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "van"
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[3, 3, 12, 3] , __UpperCAmelCase=[8, 8, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=1E-2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = patch_sizes
__UpperCamelCase = strides
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = mlp_ratios
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = drop_path_rate
__UpperCamelCase = dropout_rate
| 316
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :int ) -> list[int]:
__UpperCamelCase = 2
__UpperCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case )
if n > 1:
factors.append(snake_case )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
def A ( ) -> Optional[int]:
__UpperCamelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=snake_case , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=snake_case , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=snake_case , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=snake_case , default='data/dump' , help='The dump file prefix.' )
__UpperCamelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__UpperCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
__UpperCamelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
__UpperCamelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
__UpperCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__UpperCamelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
__UpperCamelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
__UpperCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__UpperCamelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
__UpperCamelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
__UpperCamelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(snake_case )} examples to process.' )
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 1_0_0_0_0
__UpperCamelCase = time.time()
for text in data:
__UpperCamelCase = f'{bos} {text.strip()} {sep}'
__UpperCamelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__UpperCamelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__UpperCamelCase = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(snake_case )} examples processed.' )
__UpperCamelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__UpperCamelCase = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
__UpperCamelCase = [np.uintaa(snake_case ) for d in rslt]
else:
__UpperCamelCase = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(snake_case , 'wb' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( snake_case :Any ) -> Any: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ) -> Optional[Any]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
__UpperCamelCase = [1, 2, 3]
with pytest.raises(snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case , snake_case , num_proc=2 )
with pytest.raises(snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case , snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def A ( snake_case :Union[str, Any] ) -> Optional[Any]:
__UpperCamelCase = [1, 2]
__UpperCamelCase = {'a': 1, 'b': 2}
__UpperCamelCase = {'a': [1, 2], 'b': [3, 4]}
__UpperCamelCase = {'a': {'1': 1}, 'b': 2}
__UpperCamelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__UpperCamelCase = [2, 3]
__UpperCamelCase = {'a': 2, 'b': 3}
__UpperCamelCase = {'a': [2, 3], 'b': [4, 5]}
__UpperCamelCase = {'a': {'1': 2}, 'b': 3}
__UpperCamelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A ( snake_case :Features ) -> Optional[int]:
__UpperCamelCase = np.inf
def set_batch_size(snake_case :FeatureType ) -> None:
nonlocal batch_size
if isinstance(snake_case , snake_case ):
__UpperCamelCase = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(snake_case , snake_case ):
__UpperCamelCase = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(snake_case , snake_case ) and feature.dtype == "binary":
__UpperCamelCase = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(snake_case , snake_case )
return None if batch_size is np.inf else batch_size
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 316
|
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int = 1_0_0_0_0_0_0 ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 1
__UpperCamelCase = {1: 1}
for inputa in range(2 , snake_case ):
__UpperCamelCase = 0
__UpperCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCamelCase = counter
if counter > pre_counter:
__UpperCamelCase = inputa
__UpperCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 316
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def A ( snake_case :Tuple ) -> List[str]:
__UpperCamelCase = SwinConfig()
__UpperCamelCase = swin_name.split('_' )
__UpperCamelCase = name_split[1]
__UpperCamelCase = int(name_split[4] )
__UpperCamelCase = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase = 9_6
__UpperCamelCase = (2, 2, 6, 2)
__UpperCamelCase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__UpperCamelCase = 9_6
__UpperCamelCase = (2, 2, 1_8, 2)
__UpperCamelCase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__UpperCamelCase = 1_2_8
__UpperCamelCase = (2, 2, 1_8, 2)
__UpperCamelCase = (4, 8, 1_6, 3_2)
else:
__UpperCamelCase = 1_9_2
__UpperCamelCase = (2, 2, 1_8, 2)
__UpperCamelCase = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__UpperCamelCase = 2_1_8_4_1
else:
__UpperCamelCase = 1_0_0_0
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = img_size
__UpperCamelCase = num_classes
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
return config
def A ( snake_case :Dict ) -> Tuple:
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__UpperCamelCase = 'encoder.' + name
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
__UpperCamelCase = 'layernorm.weight'
if name == "norm.bias":
__UpperCamelCase = 'layernorm.bias'
if "head" in name:
__UpperCamelCase = name.replace('head' , 'classifier' )
else:
__UpperCamelCase = 'swin.' + name
return name
def A ( snake_case :str , snake_case :Any ) -> Any:
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase = key.split('.' )
__UpperCamelCase = int(key_split[1] )
__UpperCamelCase = int(key_split[3] )
__UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[
dim : dim * 2, :
]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[
:dim
]
__UpperCamelCase = val[
dim : dim * 2
]
__UpperCamelCase = val[
-dim:
]
else:
__UpperCamelCase = val
return orig_state_dict
def A ( snake_case :Optional[Any] , snake_case :Optional[int] ) -> Optional[int]:
__UpperCamelCase = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
__UpperCamelCase = get_swin_config(snake_case )
__UpperCamelCase = SwinForImageClassification(snake_case )
model.eval()
__UpperCamelCase = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
__UpperCamelCase = image_processor(images=snake_case , return_tensors='pt' )
__UpperCamelCase = timm_model(inputs['pixel_values'] )
__UpperCamelCase = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase : Tuple = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__UpperCamelCase = {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ):
'''simple docstring'''
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__UpperCamelCase = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
__UpperCamelCase = []
def UpperCAmelCase ( self ):
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ):
'''simple docstring'''
if s == d:
return []
__UpperCamelCase = []
__UpperCamelCase = []
if s == -2:
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def UpperCAmelCase ( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
if c == -1:
__UpperCamelCase = floor(random() * 1_0000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__UpperCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 ):
'''simple docstring'''
__UpperCamelCase = deque()
__UpperCamelCase = []
if s == -2:
__UpperCamelCase = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
__UpperCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
if s == -2:
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = s
__UpperCamelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = -2
__UpperCamelCase = []
__UpperCamelCase = s
__UpperCamelCase = False
__UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCamelCase = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase = True
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = False
indirect_parents.append(__UpperCAmelCase )
__UpperCamelCase = s
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = -2
__UpperCamelCase = []
__UpperCamelCase = s
__UpperCamelCase = False
__UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCamelCase = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase = True
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = False
indirect_parents.append(__UpperCAmelCase )
__UpperCamelCase = s
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ):
'''simple docstring'''
__UpperCamelCase = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = time()
return end - begin
def UpperCAmelCase ( self , __UpperCAmelCase=-2 ):
'''simple docstring'''
__UpperCamelCase = time()
self.bfs(__UpperCAmelCase )
__UpperCamelCase = time()
return end - begin
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__UpperCamelCase = {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ):
'''simple docstring'''
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__UpperCamelCase = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__UpperCamelCase = [[w, u]]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ):
'''simple docstring'''
if s == d:
return []
__UpperCamelCase = []
__UpperCamelCase = []
if s == -2:
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def UpperCAmelCase ( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
if c == -1:
__UpperCamelCase = floor(random() * 1_0000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__UpperCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 ):
'''simple docstring'''
__UpperCamelCase = deque()
__UpperCamelCase = []
if s == -2:
__UpperCamelCase = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
__UpperCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = -2
__UpperCamelCase = []
__UpperCamelCase = s
__UpperCamelCase = False
__UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCamelCase = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase = True
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = False
indirect_parents.append(__UpperCAmelCase )
__UpperCamelCase = s
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
__UpperCamelCase = -2
__UpperCamelCase = []
__UpperCamelCase = s
__UpperCamelCase = False
__UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCamelCase = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase = True
if len(__UpperCAmelCase ) != 0:
__UpperCamelCase = stack[len(__UpperCAmelCase ) - 1]
else:
__UpperCamelCase = False
indirect_parents.append(__UpperCAmelCase )
__UpperCamelCase = s
__UpperCamelCase = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ):
'''simple docstring'''
__UpperCamelCase = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = time()
return end - begin
def UpperCAmelCase ( self , __UpperCAmelCase=-2 ):
'''simple docstring'''
__UpperCamelCase = time()
self.bfs(__UpperCAmelCase )
__UpperCamelCase = time()
return end - begin
| 316
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = {}
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if vertex not in self.adjacency:
__UpperCamelCase = {}
self.num_vertices += 1
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.add_vertex(__UpperCAmelCase )
self.add_vertex(__UpperCAmelCase )
if head == tail:
return
__UpperCamelCase = weight
__UpperCamelCase = weight
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(__UpperCAmelCase ) ):
__UpperCamelCase = list(edges[i] )
edges.sort(key=lambda __UpperCAmelCase : e[2] )
for i in range(len(__UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase = edges[i][2] + 1
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = edge
__UpperCamelCase = weight
__UpperCamelCase = weight
def __str__( self ):
'''simple docstring'''
__UpperCamelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip('\n' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
__UpperCamelCase = Graph()
if vertices is None:
__UpperCamelCase = []
if edges is None:
__UpperCamelCase = []
for vertex in vertices:
g.add_vertex(__UpperCAmelCase )
for edge in edges:
g.add_edge(*__UpperCAmelCase )
return g
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if item in self.parent:
return self.find(__UpperCAmelCase )
__UpperCamelCase = item
__UpperCamelCase = 0
return item
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__UpperCAmelCase )
if item != self.parent[item]:
__UpperCamelCase = self.find(self.parent[item] )
return self.parent[item]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.find(__UpperCAmelCase )
__UpperCamelCase = self.find(__UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase = roota
return roota
return None
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = graph.num_vertices
__UpperCamelCase = Graph.UnionFind()
__UpperCamelCase = []
while num_components > 1:
__UpperCamelCase = {}
for vertex in graph.get_vertices():
__UpperCamelCase = -1
__UpperCamelCase = graph.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = edge
__UpperCamelCase = union_find.find(__UpperCAmelCase )
__UpperCamelCase = union_find.find(__UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = cheap_edge[vertex]
if union_find.find(__UpperCAmelCase ) != union_find.find(__UpperCAmelCase ):
union_find.union(__UpperCAmelCase , __UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase = num_components - 1
__UpperCamelCase = Graph.build(edges=__UpperCAmelCase )
return mst
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase : Any = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase : Tuple = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase : Any = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase : Dict = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase : List[str] = "allenai"
def A ( snake_case :Tuple ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__UpperCamelCase = dict((re.sub(r'@@$' , '' , snake_case ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , snake_case ), v) for k, v in d.items() )
__UpperCamelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__UpperCamelCase = d[k] # restore
return da
def A ( snake_case :Optional[int] , snake_case :str ) -> List[str]:
# prep
assert os.path.exists(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__UpperCamelCase = basename(snake_case )
__UpperCamelCase = dirname(snake_case )
__UpperCamelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__UpperCamelCase = cls.hub_models()
__UpperCamelCase = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
__UpperCamelCase = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
__UpperCamelCase = hub_utils.from_pretrained(
snake_case , snake_case , snake_case , archive_map=snake_case , **snake_case )
__UpperCamelCase = vars(chkpt['args']['model'] )
__UpperCamelCase = args['source_lang']
__UpperCamelCase = args['target_lang']
__UpperCamelCase = dirname(snake_case )
__UpperCamelCase = basename(snake_case )
# dicts
__UpperCamelCase = os.path.join(snake_case , f'dict.{src_lang}.txt' )
__UpperCamelCase = os.path.join(snake_case , f'dict.{tgt_lang}.txt' )
__UpperCamelCase = Dictionary.load(snake_case )
__UpperCamelCase = rewrite_dict_keys(src_dict.indices )
__UpperCamelCase = len(snake_case )
__UpperCamelCase = os.path.join(snake_case , 'vocab-src.json' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__UpperCamelCase = True
for k in src_vocab.keys():
if not k.islower():
__UpperCamelCase = False
break
__UpperCamelCase = Dictionary.load(snake_case )
__UpperCamelCase = rewrite_dict_keys(tgt_dict.indices )
__UpperCamelCase = len(snake_case )
__UpperCamelCase = os.path.join(snake_case , 'vocab-tgt.json' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
__UpperCamelCase = os.path.join(snake_case , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__UpperCamelCase = os.path.join(snake_case , snake_case )
if os.path.exists(snake_case ):
break
with open(snake_case , encoding='utf-8' ) as fin:
__UpperCamelCase = fin.read()
__UpperCamelCase = re.sub(r' \d+$' , '' , snake_case , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as fout:
fout.write(snake_case )
# model config
__UpperCamelCase = os.path.join(snake_case , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
__UpperCamelCase = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
__UpperCamelCase = 5
__UpperCamelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__UpperCamelCase = best_score_hparams[model_dir]['length_penalty']
else:
__UpperCamelCase = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
__UpperCamelCase = os.path.join(snake_case , snake_case )
__UpperCamelCase = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1_0_2_4,
'do_lower_case': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
__UpperCamelCase = chkpt['models'][0]
__UpperCamelCase = model.state_dict()
# rename keys to start with 'model.'
__UpperCamelCase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__UpperCamelCase = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
__UpperCamelCase = FSMTConfig.from_pretrained(snake_case )
__UpperCamelCase = FSMTForConditionalGeneration(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case , strict=snake_case )
# save
__UpperCamelCase = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase : str = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "markuplm"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=256 , __UpperCAmelCase=1024 , __UpperCAmelCase=216 , __UpperCAmelCase=1001 , __UpperCAmelCase=32 , __UpperCAmelCase=50 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
# additional properties
__UpperCamelCase = max_depth
__UpperCamelCase = max_xpath_tag_unit_embeddings
__UpperCamelCase = max_xpath_subs_unit_embeddings
__UpperCamelCase = tag_pad_id
__UpperCamelCase = subs_pad_id
__UpperCamelCase = xpath_unit_hidden_size
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Tuple = {"vocab_file": "spiece.model"}
UpperCamelCase : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCamelCase : str = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
UpperCamelCase : List[str] = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__UpperCamelCase = ' '.join(inputs.strip().split() )
else:
__UpperCamelCase = inputs
__UpperCamelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__UpperCamelCase = unicodedata.normalize('NFKD' , __UpperCAmelCase )
__UpperCamelCase = ''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.preprocess_text(__UpperCAmelCase )
__UpperCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__UpperCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase = cur_pieces[1:]
else:
__UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = ''
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__UpperCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from math import isqrt, loga
def A ( snake_case :int ) -> list[int]:
__UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
__UpperCamelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def A ( snake_case :int = 8_0_0_8_0_0 , snake_case :int = 8_0_0_8_0_0 ) -> int:
__UpperCamelCase = degree * loga(snake_case )
__UpperCamelCase = int(snake_case )
__UpperCamelCase = calculate_prime_numbers(snake_case )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316
| 1
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCamelCase : List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase : List[Any] = get_tests_dir("fixtures/vocab.json")
UpperCamelCase : Any = get_tests_dir("fixtures")
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
__UpperCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'vocab.json' ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
__UpperCamelCase = WavaVecaProcessor(__UpperCAmelCase , __UpperCAmelCase )
# save in new folder
processor.save_pretrained(__UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(__UpperCAmelCase ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
__UpperCamelCase = WavaVecaProcessor(__UpperCAmelCase , __UpperCAmelCase )
# save in new folder
processor.save_pretrained(__UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(__UpperCAmelCase ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__UpperCAmelCase )
# copy relevant files
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write('{}' )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
__UpperCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
__UpperCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
__UpperCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def UpperCAmelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(__UpperCAmelCase , 'vocab.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(__UpperCAmelCase )
__UpperCamelCase = CustomProcessor(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "AutoFeatureExtractor"
lowercase = "AutoTokenizer"
lowercase = False
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCAmelCase , 'test-processor' ) , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__UpperCamelCase = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(new_processor.feature_extractor , __UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCAmelCase , 'test-processor-org' ) , push_to_hub=__UpperCAmelCase , use_auth_token=self._token , organization='valid_org' , )
__UpperCamelCase = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(new_processor.feature_extractor , __UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(__UpperCAmelCase , 'vocab.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(__UpperCAmelCase )
__UpperCamelCase = CustomProcessor(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
__UpperCamelCase = Repository(__UpperCAmelCase , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(__UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_processing.py' ) ) )
repo.push_to_hub()
__UpperCamelCase = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 316
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> Optional[int]:
__UpperCamelCase = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
return image
def A ( snake_case :List[Any] ) -> Dict:
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def A ( snake_case :Optional[Any] , snake_case :str , snake_case :Tuple ) -> Any:
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A ( snake_case :List[str] , snake_case :int ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
__UpperCamelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(snake_case , requires_grad=snake_case ), v_bias) )
__UpperCamelCase = qkv_bias
def A ( snake_case :Optional[int] ) -> Tuple:
__UpperCamelCase = 3_6_4 if 'coco' in model_name else 2_2_4
__UpperCamelCase = InstructBlipVisionConfig(image_size=snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCamelCase = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
__UpperCamelCase = InstructBlipConfig(vision_config=snake_case , text_config=snake_case , qformer_config=snake_case )
return config, image_size
@torch.no_grad()
def A ( snake_case :Any , snake_case :str=None , snake_case :Optional[int]=False ) -> List[str]:
__UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
__UpperCamelCase = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCamelCase = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
__UpperCamelCase , __UpperCamelCase = get_blipa_config(snake_case )
__UpperCamelCase = InstructBlipForConditionalGeneration(snake_case ).eval()
__UpperCamelCase = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
__UpperCamelCase , __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda:1' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase = 'cuda:2' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_model_and_preprocess(
name=snake_case , model_type=snake_case , is_eval=snake_case , device=snake_case )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(snake_case )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "llm_proj" in key:
__UpperCamelCase = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
__UpperCamelCase = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(snake_case , snake_case )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(snake_case , strict=snake_case )
__UpperCamelCase = load_demo_image()
__UpperCamelCase = 'What is unusual about this image?'
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case , image_std=snake_case )
__UpperCamelCase = InstructBlipProcessor(
image_processor=snake_case , tokenizer=snake_case , qformer_tokenizer=snake_case , )
__UpperCamelCase = processor(images=snake_case , text=snake_case , return_tensors='pt' ).to(snake_case )
# make sure processor creates exact same pixel values
__UpperCamelCase = vis_processors['eval'](snake_case ).unsqueeze(0 ).to(snake_case )
__UpperCamelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , snake_case )
original_model.to(snake_case )
hf_model.to(snake_case )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
__UpperCamelCase = hf_model(**snake_case ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
__UpperCamelCase = tokenizer('\n' , return_tensors='pt' ).input_ids.to(snake_case )
__UpperCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
__UpperCamelCase = hf_model(**snake_case , labels=snake_case ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCamelCase = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , snake_case , atol=snake_case )
print('Looks ok!' )
print('Generating with original model...' )
__UpperCamelCase = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
__UpperCamelCase = hf_model.generate(
**snake_case , do_sample=snake_case , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCamelCase = 2
print('Original generation:' , snake_case )
__UpperCamelCase = processor.batch_decode(snake_case , skip_special_tokens=snake_case )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
UpperCamelCase : List[Any] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
| 1
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase : Union[str, Any] = namedtuple("covid_data", "cases deaths recovered")
def A ( snake_case :str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__UpperCamelCase = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(snake_case ).content ).xpath(snake_case ) )
UpperCamelCase : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 316
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "longformer"
def __init__( self , __UpperCAmelCase = 512 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0 , __UpperCAmelCase = 2 , __UpperCAmelCase = 3_0522 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 3072 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 512 , __UpperCAmelCase = 2 , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = 1E-12 , __UpperCAmelCase = False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = attention_window
__UpperCamelCase = sep_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = onnx_export
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = True
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().outputs
if self.task == "default":
__UpperCamelCase = {0: 'batch'}
return outputs
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__UpperCamelCase = torch.zeros_like(inputs['input_ids'] )
# make every second token global
__UpperCamelCase = 1
return inputs
| 316
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :Tuple ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def A ( snake_case :dict[int, list[int]] ) -> list[tuple[int, int]]:
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) # No of vertices in graph
__UpperCamelCase = [0] * n
__UpperCamelCase = [False] * n
def dfs(snake_case :Tuple , snake_case :int , snake_case :Any , snake_case :Optional[int] ):
__UpperCamelCase = True
__UpperCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case , snake_case , snake_case , id_ )
__UpperCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCamelCase = min(low[at] , low[to] )
__UpperCamelCase = []
for i in range(snake_case ):
if not visited[i]:
dfs(snake_case , -1 , snake_case , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCamelCase = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [[1, 2, 3], [1, 2, 4]]
__UpperCamelCase = DisjunctiveConstraint(__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(1 )
__UpperCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(2 )
__UpperCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(3 )
__UpperCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCamelCase = DisjunctiveConstraint(__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 316
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def A ( snake_case :int , snake_case :str=False ) -> Union[str, Any]:
__UpperCamelCase = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :Any=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase = ''
else:
__UpperCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase = in_proj_bias[: config.hidden_size]
__UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase = in_proj_bias[-config.hidden_size :]
def A ( snake_case :List[Any] ) -> int:
__UpperCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def A ( snake_case :Optional[int] , snake_case :Any , snake_case :str ) -> List[Any]:
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A ( ) -> Union[str, Any]:
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A ( snake_case :Optional[int] , snake_case :Any , snake_case :Tuple=False ) -> List[str]:
__UpperCamelCase = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=snake_case , )
__UpperCamelCase = ViTHybridConfig(backbone_config=snake_case , image_size=3_8_4 , num_labels=1_0_0_0 )
__UpperCamelCase = False
# load original model from timm
__UpperCamelCase = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__UpperCamelCase = ViTHybridModel(snake_case ).eval()
else:
__UpperCamelCase = ViTHybridForImageClassification(snake_case ).eval()
model.load_state_dict(snake_case )
# create image processor
__UpperCamelCase = create_transform(**resolve_data_config({} , model=snake_case ) )
__UpperCamelCase = transform.transforms
__UpperCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__UpperCamelCase = ViTHybridImageProcessor(
do_resize=snake_case , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCamelCase = prepare_img()
__UpperCamelCase = transform(snake_case ).unsqueeze(0 )
__UpperCamelCase = processor(snake_case , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(snake_case , snake_case )
# verify logits
with torch.no_grad():
__UpperCamelCase = model(snake_case )
__UpperCamelCase = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
__UpperCamelCase = timm_model.forward_features(snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case , outputs.pooler_output , atol=1e-3 )
else:
__UpperCamelCase = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(f'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(f'ybelkada/{vit_name}' )
processor.push_to_hub(f'ybelkada/{vit_name}' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
UpperCamelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase : str = 3
def A ( snake_case :int ) -> int:
print('Generating primitive root of p' )
while True:
__UpperCamelCase = random.randrange(3 , snake_case )
if pow(snake_case , 2 , snake_case ) == 1:
continue
if pow(snake_case , snake_case , snake_case ) == 1:
continue
return g
def A ( snake_case :int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
__UpperCamelCase = rabin_miller.generate_large_prime(snake_case ) # select large prime number.
__UpperCamelCase = primitive_root(snake_case ) # one primitive root on modulo p.
__UpperCamelCase = random.randrange(3 , snake_case ) # private_key -> have to be greater than 2 for safety.
__UpperCamelCase = cryptomath.find_mod_inverse(pow(snake_case , snake_case , snake_case ) , snake_case )
__UpperCamelCase = (key_size, e_a, e_a, p)
__UpperCamelCase = (key_size, d)
return public_key, private_key
def A ( snake_case :str , snake_case :int ) -> None:
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__UpperCamelCase , __UpperCamelCase = generate_key(snake_case )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , 'w' ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , 'w' ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def A ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import functools
def A ( snake_case :list[int] , snake_case :list[int] ) -> int:
# Validation
if not isinstance(snake_case , snake_case ) or not all(isinstance(snake_case , snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(snake_case ) != 3 or not all(isinstance(snake_case , snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(snake_case ) == 0:
return 0
if min(snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(snake_case ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
__UpperCamelCase = set(snake_case )
@functools.cache
def dynamic_programming(snake_case :int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import bisect
def A ( snake_case :list[int] , snake_case :int , snake_case :int = 0 , snake_case :int = -1 ) -> int:
if hi < 0:
__UpperCamelCase = len(snake_case )
while lo < hi:
__UpperCamelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid
return lo
def A ( snake_case :list[int] , snake_case :int , snake_case :int = 0 , snake_case :int = -1 ) -> int:
if hi < 0:
__UpperCamelCase = len(snake_case )
while lo < hi:
__UpperCamelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid
return lo
def A ( snake_case :list[int] , snake_case :int , snake_case :int = 0 , snake_case :int = -1 ) -> None:
sorted_collection.insert(bisect_left(snake_case , snake_case , snake_case , snake_case ) , snake_case )
def A ( snake_case :list[int] , snake_case :int , snake_case :int = 0 , snake_case :int = -1 ) -> None:
sorted_collection.insert(bisect_right(snake_case , snake_case , snake_case , snake_case ) , snake_case )
def A ( snake_case :list[int] , snake_case :int ) -> int | None:
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
while left <= right:
__UpperCamelCase = left + (right - left) // 2
__UpperCamelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCamelCase = midpoint - 1
else:
__UpperCamelCase = midpoint + 1
return None
def A ( snake_case :list[int] , snake_case :int ) -> int | None:
__UpperCamelCase = bisect.bisect_left(snake_case , snake_case )
if index != len(snake_case ) and sorted_collection[index] == item:
return index
return None
def A ( snake_case :list[int] , snake_case :int , snake_case :int , snake_case :int ) -> int | None:
if right < left:
return None
__UpperCamelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case , snake_case , snake_case , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case , snake_case , midpoint + 1 , snake_case )
if __name__ == "__main__":
UpperCamelCase : str = input("Enter numbers separated by comma:\n").strip()
UpperCamelCase : str = sorted(int(item) for item in user_input.split(","))
UpperCamelCase : int = int(input("Enter a single number to be found in the list:\n"))
UpperCamelCase : Tuple = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 316
|
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=4 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__UpperCamelCase = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
__UpperCamelCase = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 316
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316
| 1
|
"""simple docstring"""
UpperCamelCase : List[Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( snake_case :dict , snake_case :Optional[int] , snake_case :List[Any] ) -> list[str]:
__UpperCamelCase = set()
# keep track of all the paths to be checked
__UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCamelCase = queue.pop(0 )
# get the last node from the path
__UpperCamelCase = path[-1]
if node not in explored:
__UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCamelCase = list(snake_case )
new_path.append(snake_case )
queue.append(snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case )
# in case there's no path between the 2 nodes
return []
def A ( snake_case :dict , snake_case :Any , snake_case :Tuple ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCamelCase = [start]
__UpperCamelCase = set(snake_case )
# Keep tab on distances from `start` node.
__UpperCamelCase = {start: 0, target: -1}
while queue:
__UpperCamelCase = queue.pop(0 )
if node == target:
__UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case )
queue.append(snake_case )
__UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
UpperCamelCase : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def A ( snake_case :int ) -> int:
__UpperCamelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
UpperCamelCase : int = True
UpperCamelCase : List[Any] = False
def A ( snake_case :int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCamelCase = chain(next_number(snake_case ) )
__UpperCamelCase = number_chain
while number < 1_0_0_0_0_0_0_0:
__UpperCamelCase = number_chain
number *= 1_0
return number_chain
def A ( snake_case :int = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 316
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = size if size is not None else {'shortest_edge': 384}
__UpperCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCamelCase = do_resize
__UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
__UpperCamelCase = crop_pct if crop_pct is not None else 224 / 256
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__UpperCamelCase = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__UpperCamelCase = int(shortest_edge / crop_pct )
__UpperCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCamelCase = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
UpperCamelCase : List[str] = "Input must be a string of 8 numbers plus letter"
UpperCamelCase : Optional[int] = "TRWAGMYFPDXBNJZSQVHLCKE"
def A ( snake_case :str ) -> bool:
if not isinstance(snake_case , snake_case ):
__UpperCamelCase = f'Expected string as input, found {type(snake_case ).__name__}'
raise TypeError(snake_case )
__UpperCamelCase = spanish_id.replace('-' , '' ).upper()
if len(snake_case ) != 9:
raise ValueError(snake_case )
try:
__UpperCamelCase = int(spanish_id_clean[0:8] )
__UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case ) from ex
if letter.isdigit():
raise ValueError(snake_case )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
UpperCamelCase : str = [8, 5, 9, 7]
UpperCamelCase : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = claim_vector
__UpperCamelCase = allocated_resources_table
__UpperCamelCase = maximum_claim_table
def UpperCAmelCase ( self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase ( self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase ( self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase ( self ):
'''simple docstring'''
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.__need()
__UpperCamelCase = self.__allocated_resources_table
__UpperCamelCase = self.__available_resources()
__UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__UpperCamelCase = False
for each_need in need_list:
__UpperCamelCase = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
__UpperCamelCase = False
break
if execution:
__UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCamelCase = original_need_index
print(F'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
__UpperCamelCase = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase ( self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}'
+ ' '.join(F'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}'
+ ' '.join(F'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 316
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import string
from math import logaa
def A ( snake_case :str , snake_case :str ) -> int:
__UpperCamelCase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__UpperCamelCase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( snake_case :str , snake_case :str ) -> tuple[int, int]:
__UpperCamelCase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__UpperCamelCase = corpus_without_punctuation.split('\n' )
__UpperCamelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case ))
def A ( snake_case :int , snake_case :int , snake_case :List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def A ( snake_case :int , snake_case :int ) -> float:
return round(tf * idf , 3 )
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from functools import reduce
UpperCamelCase : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( snake_case :str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case , snake_case : str(int(snake_case ) * int(snake_case ) ) , n[i : i + 1_3] ) )
for i in range(len(snake_case ) - 1_2 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316
| 1
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def A ( snake_case :List[Any] , snake_case :List[str] ) -> Union[str, Any]:
# ===== initialization =====
__UpperCamelCase = Mock()
__UpperCamelCase = conn, Mock()
__UpperCamelCase = iter([1, None] )
__UpperCamelCase = lambda snake_case : next(snake_case )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=snake_case )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 316
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :str , snake_case :int ) -> str:
__UpperCamelCase = [[] for _ in range(snake_case )]
__UpperCamelCase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(snake_case ) <= key:
return input_string
for position, character in enumerate(snake_case ):
__UpperCamelCase = position % (lowest * 2) # puts it in bounds
__UpperCamelCase = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case )
__UpperCamelCase = [''.join(snake_case ) for row in temp_grid]
__UpperCamelCase = ''.join(snake_case )
return output_string
def A ( snake_case :str , snake_case :int ) -> str:
__UpperCamelCase = []
__UpperCamelCase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__UpperCamelCase = [[] for _ in range(snake_case )] # generates template
for position in range(len(snake_case ) ):
__UpperCamelCase = position % (lowest * 2) # puts it in bounds
__UpperCamelCase = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__UpperCamelCase = 0
for row in temp_grid: # fills in the characters
__UpperCamelCase = input_string[counter : counter + len(snake_case )]
grid.append(list(snake_case ) )
counter += len(snake_case )
__UpperCamelCase = '' # reads as zigzag
for position in range(len(snake_case ) ):
__UpperCamelCase = position % (lowest * 2) # puts it in bounds
__UpperCamelCase = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A ( snake_case :str ) -> dict[int, str]:
__UpperCamelCase = {}
for key_guess in range(1 , len(snake_case ) ): # tries every key
__UpperCamelCase = decrypt(snake_case , snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Tuple = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
UpperCamelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
__UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCamelCase = src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
__UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase = src_lang
__UpperCamelCase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = tgt_lang_id
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
__UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
__UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__UpperCamelCase = 1
__UpperCamelCase = 1
while repunit:
__UpperCamelCase = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( snake_case :int = 1_0_0_0_0_0_0 ) -> int:
__UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=[30, 30] , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=8 , __UpperCAmelCase=10 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = scope
__UpperCamelCase = n_targets
__UpperCamelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__UpperCamelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__UpperCamelCase = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__UpperCamelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__UpperCamelCase = []
for i in range(self.batch_size ):
__UpperCamelCase = {}
__UpperCamelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__UpperCAmelCase )
__UpperCamelCase = torch.rand(self.n_targets , 4 , device=__UpperCAmelCase )
labels.append(__UpperCAmelCase )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = YolosModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = YolosForObjectDetection(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(pixel_values=__UpperCAmelCase )
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__UpperCamelCase = []
for i in range(self.model_tester.batch_size ):
__UpperCamelCase = {}
__UpperCamelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=__UpperCAmelCase , dtype=torch.long )
__UpperCamelCase = torch.ones(
self.model_tester.n_targets , 4 , device=__UpperCAmelCase , dtype=torch.float )
labels.append(__UpperCAmelCase )
__UpperCamelCase = labels
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = YolosModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
# in YOLOS, the seq_len is different
__UpperCamelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase = True
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCamelCase = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = 1
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# YOLOS has a different seq_length
__UpperCamelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = YolosModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Optional[Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(inputs.pixel_values )
# verify outputs
__UpperCamelCase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__UpperCAmelCase , )
__UpperCamelCase = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify postprocessing
__UpperCamelCase = image_processor.post_process_object_detection(
__UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__UpperCamelCase = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__UpperCAmelCase )
__UpperCamelCase = [75, 75, 17, 63, 17]
__UpperCamelCase = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__UpperCAmelCase )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , __UpperCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , __UpperCAmelCase )
self.assertTrue(torch.allclose(results['boxes'][0, :] , __UpperCAmelCase ) )
| 316
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 1
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int = 1_0_0_0_0_0_0 ) -> int:
__UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 316
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316
| 1
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 256
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = cva.imread(__UpperCAmelCase , 0 )
__UpperCamelCase = copy.deepcopy(self.img )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
__UpperCamelCase = np.sum(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
__UpperCamelCase = x[i] / self.k
self.sk += prk
__UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__UpperCamelCase = int(last % last )
__UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__UpperCAmelCase )
__UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
__UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase ( self ):
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase ( self ):
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCamelCase : Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 316
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
while second != 0:
__UpperCamelCase = first & second
first ^= second
__UpperCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : str = int(input("Enter the first number: ").strip())
UpperCamelCase : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 316
|
"""simple docstring"""
def A ( snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__UpperCamelCase = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__UpperCamelCase = int(sequence[i] , 2 )
return sequence
def A ( snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__UpperCamelCase = gray_code_sequence_string(bit_count - 1 )
__UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase : str = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__UpperCamelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase , repo_id='test-config' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__UpperCamelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__UpperCamelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__UpperCamelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
__UpperCamelCase = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__UpperCamelCase = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__UpperCamelCase = c.n_embd + 1 # int
__UpperCamelCase = c.resid_pdrop + 1.0 # float
__UpperCamelCase = not c.scale_attn_weights # bool
__UpperCamelCase = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(__UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = PretrainedConfig()
__UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__UpperCAmelCase , __UpperCAmelCase )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(__UpperCAmelCase )}.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__UpperCAmelCase ) as mock_head:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained('bert-base-cased' )
__UpperCamelCase = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__UpperCamelCase = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__UpperCamelCase = ['config.42.0.0.json']
__UpperCamelCase = 768
configuration.save_pretrained(__UpperCAmelCase )
shutil.move(os.path.join(__UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(__UpperCAmelCase , 'config.42.0.0.json' ) )
__UpperCamelCase = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__UpperCamelCase = 'v4.0.0'
__UpperCamelCase , __UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__UpperCamelCase = 'v3.0.0'
__UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 768 )
| 316
|
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316
| 1
|
"""simple docstring"""
class __lowerCAmelCase : # Public class to implement a graph
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = row
__UpperCamelCase = col
__UpperCamelCase = graph
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__UpperCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__UpperCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase )
def UpperCAmelCase ( self ): # And finally, count all islands.
'''simple docstring'''
__UpperCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__UpperCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += 1
return count
| 316
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316
| 1
|
"""simple docstring"""
def A ( ) -> Any:
__UpperCamelCase = []
__UpperCamelCase = 1
while len(snake_case ) < 1e6:
constant.append(str(snake_case ) )
i += 1
__UpperCamelCase = ''.join(snake_case )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A ( snake_case :str ) -> List[str]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase = model_type_to_module_name(snake_case )
__UpperCamelCase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case , '__name__' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase = importlib.import_module('transformers' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def A ( snake_case :Union[str, os.PathLike] , snake_case :Optional[Union[str, os.PathLike]] = None , snake_case :bool = False , snake_case :bool = False , snake_case :Optional[Dict[str, str]] = None , snake_case :Optional[Union[bool, str]] = None , snake_case :Optional[str] = None , snake_case :bool = False , **snake_case :List[str] , ) -> List[Any]:
__UpperCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case , encoding='utf-8' ) as reader:
return json.load(snake_case )
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('config' , __UpperCAmelCase )
__UpperCamelCase = kwargs.pop('trust_remote_code' , __UpperCAmelCase )
__UpperCamelCase = True
__UpperCamelCase , __UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = config_dict.get('feature_extractor_type' , __UpperCAmelCase )
__UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__UpperCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
__UpperCamelCase = getattr(__UpperCAmelCase , 'feature_extractor_type' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__UpperCamelCase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__UpperCamelCase = feature_extractor_class_from_name(__UpperCAmelCase )
__UpperCamelCase = feature_extractor_auto_map is not None
__UpperCamelCase = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
__UpperCamelCase = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
__UpperCamelCase = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = kwargs.pop('code_revision' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
__UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 316
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 13
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 99
__UpperCamelCase = 0
__UpperCamelCase = 32
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 512
__UpperCamelCase = 16
__UpperCamelCase = 2
__UpperCamelCase = 0.0_2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = ViTMSNModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = ViTMSNForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = ViTMSNForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowercase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ViTMSNModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ViTMSNModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Optional[int]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(2 )
__UpperCamelCase = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "convbert"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=768 , __UpperCAmelCase=2 , __UpperCAmelCase=9 , __UpperCAmelCase=1 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = embedding_size
__UpperCamelCase = head_ratio
__UpperCamelCase = conv_kernel_size
__UpperCamelCase = num_groups
__UpperCamelCase = classifier_dropout
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.