code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=1 , snake_case__=False , **snake_case__ ):
super().__init__(**_snake_case )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Any = d_embed
lowerCAmelCase : Union[str, Any] = d_proj
lowerCAmelCase : Optional[int] = cutoffs + [vocab_size]
lowerCAmelCase : Optional[int] = [0] + self.cutoffs
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : Any = self.cutoffs[0]
lowerCAmelCase : Union[str, Any] = len(self.cutoffs ) - 1
lowerCAmelCase : Any = self.shortlist_size + self.n_clusters
lowerCAmelCase : List[str] = keep_order
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = []
def lowercase ( self , snake_case__ ):
if self.n_clusters > 0:
lowerCAmelCase : List[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_snake_case , name='cluster_weight' )
lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_snake_case , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=f"out_projs_._{i}" , )
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
lowerCAmelCase : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_snake_case , name=f"out_layers_._{i}_._weight" , )
lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_snake_case , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Union[str, Any] = self.d_embed // (self.div_val**i)
lowerCAmelCase : str = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=f"out_projs_._{i}" )
self.out_projs.append(_snake_case )
lowerCAmelCase : Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_snake_case , name=f"out_layers_._{i}_._weight" , )
lowerCAmelCase : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_snake_case , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def lowercase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
lowerCAmelCase : Dict = x
if proj is not None:
lowerCAmelCase : Optional[Any] = tf.einsum('ibd,ed->ibe' , _snake_case , _snake_case )
return tf.einsum('ibd,nd->ibn' , _snake_case , _snake_case ) + b
@staticmethod
def lowercase ( snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = shape_list(_snake_case )
lowerCAmelCase : Any = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(_snake_case , _snake_case )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=True , snake_case__=False ):
lowerCAmelCase : Any = 0
if self.n_clusters == 0:
lowerCAmelCase : List[str] = self._logit(_snake_case , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase : Optional[int] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case , logits=_snake_case )
lowerCAmelCase : Any = tf.nn.log_softmax(_snake_case , axis=-1 )
else:
lowerCAmelCase : Optional[int] = shape_list(_snake_case )
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Any = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase , lowerCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase : Any = (target >= l_idx) & (target < r_idx)
lowerCAmelCase : Union[str, Any] = tf.where(_snake_case )
lowerCAmelCase : Any = tf.boolean_mask(_snake_case , _snake_case ) - l_idx
if self.div_val == 1:
lowerCAmelCase : int = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase : Dict = self.out_layers[i][0]
lowerCAmelCase : Any = self.out_layers[i][1]
if i == 0:
lowerCAmelCase : Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase : Union[str, Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase : str = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[0] )
lowerCAmelCase : Dict = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase : List[Any] = tf.boolean_mask(_snake_case , _snake_case )
lowerCAmelCase : int = self._gather_logprob(_snake_case , _snake_case )
else:
lowerCAmelCase : Any = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[i] )
lowerCAmelCase : Optional[Any] = tf.nn.log_softmax(_snake_case )
lowerCAmelCase : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase : Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
lowerCAmelCase : Union[str, Any] = tf.boolean_mask(_snake_case , _snake_case )
lowerCAmelCase : Dict = tf.boolean_mask(_snake_case , _snake_case )
lowerCAmelCase : Optional[int] = self._gather_logprob(_snake_case , _snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case , -cur_logprob , shape_list(_snake_case ) )
lowerCAmelCase : int = tf.concat(_snake_case , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase : Union[str, Any] = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self , *snake_case__ , **snake_case__ ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCamelCase ( _A : List[Any] , _A : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase : Dict = checkpoint
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Optional[Any] = vae_state_dict["""encoder.conv_in.weight"""]
lowerCAmelCase : Any = vae_state_dict["""encoder.conv_in.bias"""]
lowerCAmelCase : int = vae_state_dict["""encoder.conv_out.weight"""]
lowerCAmelCase : List[Any] = vae_state_dict["""encoder.conv_out.bias"""]
lowerCAmelCase : List[str] = vae_state_dict["""encoder.norm_out.weight"""]
lowerCAmelCase : Any = vae_state_dict["""encoder.norm_out.bias"""]
lowerCAmelCase : Any = vae_state_dict["""decoder.conv_in.weight"""]
lowerCAmelCase : List[Any] = vae_state_dict["""decoder.conv_in.bias"""]
lowerCAmelCase : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""]
lowerCAmelCase : int = vae_state_dict["""decoder.conv_out.bias"""]
lowerCAmelCase : Any = vae_state_dict["""decoder.norm_out.weight"""]
lowerCAmelCase : List[str] = vae_state_dict["""decoder.norm_out.bias"""]
lowerCAmelCase : Any = vae_state_dict["""quant_conv.weight"""]
lowerCAmelCase : str = vae_state_dict["""quant_conv.bias"""]
lowerCAmelCase : List[Any] = vae_state_dict["""post_quant_conv.weight"""]
lowerCAmelCase : Any = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase : Any = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowerCAmelCase : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase : List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowerCAmelCase : Tuple = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
lowerCAmelCase : Optional[int] = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase : Dict = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase : int = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase : Dict = renew_vae_resnet_paths(snake_case_ )
lowerCAmelCase : Tuple = {"""old""": F"down.{i}.block", """new""": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
lowerCAmelCase : Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
lowerCAmelCase : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase : Dict = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase : Any = renew_vae_resnet_paths(snake_case_ )
lowerCAmelCase : Tuple = {"""old""": F"mid.block_{i}", """new""": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
lowerCAmelCase : List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowerCAmelCase : List[str] = renew_vae_attention_paths(snake_case_ )
lowerCAmelCase : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
lowerCAmelCase : Dict = num_up_blocks - 1 - i
lowerCAmelCase : Any = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase : Tuple = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase : Union[str, Any] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase : List[str] = renew_vae_resnet_paths(snake_case_ )
lowerCAmelCase : Union[str, Any] = {"""old""": F"up.{block_id}.block", """new""": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
lowerCAmelCase : List[str] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
lowerCAmelCase : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase : Dict = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase : List[Any] = renew_vae_resnet_paths(snake_case_ )
lowerCAmelCase : Any = {"""old""": F"mid.block_{i}", """new""": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
lowerCAmelCase : str = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowerCAmelCase : List[str] = renew_vae_attention_paths(snake_case_ )
lowerCAmelCase : Union[str, Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def __UpperCamelCase ( _A : List[str] , _A : List[str] , ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowerCAmelCase : Any = io.BytesIO(r.content )
lowerCAmelCase : Optional[Any] = OmegaConf.load(snake_case_ )
lowerCAmelCase : Tuple = 5_12
lowerCAmelCase : str = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowerCAmelCase : int = {}
with safe_open(snake_case_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowerCAmelCase : Optional[Any] = f.get_tensor(snake_case_ )
else:
lowerCAmelCase : str = torch.load(snake_case_ , map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
lowerCAmelCase : int = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
lowerCAmelCase : Optional[int] = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
lowerCAmelCase : List[str] = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_lowerCAmelCase : Dict = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Tuple = CLIPConfig
_lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self , snake_case__ ):
super().__init__(UpperCamelCase__ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : str = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=0.5 , snake_case__=0.5 ):
lowerCAmelCase : Optional[Any] = self.vision_model(UpperCamelCase__ )[0]
lowerCAmelCase : int = self.p_head(UpperCamelCase__ )
lowerCAmelCase : int = nsfw_detected.flatten()
lowerCAmelCase : List[Any] = nsfw_detected > p_threshold
lowerCAmelCase : Dict = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase__ )
lowerCAmelCase : List[Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Optional[int] = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
lowerCAmelCase : int = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = OpenAIGPTTokenizer
_lowerCamelCase : int = OpenAIGPTTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : Any = False
def lowercase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase : Any = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowercase ( self , snake_case__ ):
return "lower newer", "lower newer"
def lowercase ( self ):
lowerCAmelCase : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase : List[str] = 'lower'
lowerCAmelCase : int = ['low', 'er</w>']
lowerCAmelCase : Any = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase : List[Any] = tokens + ['<unk>']
lowerCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowercase ( self , snake_case__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
lowerCAmelCase : Dict = 'This is a simple input'
lowerCAmelCase : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase : List[str] = ('This is a simple input', 'This is a pair')
lowerCAmelCase : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
def lowercase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase ( lowerCAmelCase__ ):
pass
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( _A , _A , _A , unittest.TestCase ):
_lowerCamelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline
_lowerCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : Optional[Any] = CLIPTextModel(snake_case__ )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' )
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase : int = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Union[str, Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[str] = "french fries"
lowerCAmelCase : int = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Optional[int] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase : List[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : int = [inputs["prompt"]] * 2
lowerCAmelCase : Optional[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
lowerCAmelCase : Union[str, Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
lowerCAmelCase : Tuple = image / 2 + 0.5
lowerCAmelCase : Tuple = image.permute(0 , 3 , 1 , 2 )
lowerCAmelCase : Dict = image.repeat(2 , 1 , 1 , 1 )
lowerCAmelCase : List[Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCAmelCase : str = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
lowerCAmelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase : str = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Dict = sd_pipe(**snake_case__ ).images
lowerCAmelCase : int = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : str = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase : List[Any] = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ )
lowerCAmelCase : Any = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type='pt' ) )[0]
lowerCAmelCase : Dict = components["vae"]
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs_by_type(snake_case__ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCAmelCase : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCAmelCase : Dict = pipe(**snake_case__ )[0]
lowerCAmelCase : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case__ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__=0 ):
lowerCAmelCase : List[Any] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowerCAmelCase : List[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : int = self.get_inputs()
lowerCAmelCase : List[str] = pipe(**snake_case__ ).images
lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case__ )
lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Optional[Any] = self.get_inputs()
lowerCAmelCase : Dict = pipe(**snake_case__ ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case__ )
lowerCAmelCase : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = self.get_inputs()
lowerCAmelCase : str = pipe(**snake_case__ ).images
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : List[Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : str = 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
lowerCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase : Any = latents[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase : List[str] = latents[0, -3:, -3:, -1]
lowerCAmelCase : Tuple = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase : Any = False
lowerCAmelCase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Dict = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowerCAmelCase : int = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Any = self.get_inputs()
lowerCAmelCase : int = pipe(**snake_case__ )
lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase ( self ):
lowerCAmelCase : Any = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase : Optional[int] = inputs["image"].resize((504, 504) )
lowerCAmelCase : List[Any] = "timbrooks/instruct-pix2pix"
lowerCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Optional[int] = pipe(**snake_case__ )
lowerCAmelCase : Any = output.images[0]
lowerCAmelCase : Union[str, Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCAmelCase : Optional[int] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __lowercase , )
class lowerCAmelCase ( __lowercase ):
_lowerCamelCase : str = RobertaConfig
_lowerCamelCase : Dict = 'roberta'
def __init__( self , snake_case__ ):
super().__init__(__A )
lowerCAmelCase : List[Any] = RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , __lowercase , )
class lowerCAmelCase ( __lowercase ):
_lowerCamelCase : int = RobertaConfig
_lowerCamelCase : Tuple = 'roberta'
def __init__( self , snake_case__ ):
super().__init__(__A )
lowerCAmelCase : Union[str, Any] = config.num_labels
lowerCAmelCase : Any = config.num_hidden_layers
lowerCAmelCase : List[Any] = DeeRobertaModel(__A )
lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def lowercase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=-1 , snake_case__=False , ):
lowerCAmelCase : Optional[Any] = self.num_layers
try:
lowerCAmelCase : Any = self.roberta(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
lowerCAmelCase : Dict = outputs[1]
lowerCAmelCase : Any = self.dropout(__A )
lowerCAmelCase : Union[str, Any] = self.classifier(__A )
lowerCAmelCase : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase : Any = e.message
lowerCAmelCase : Any = e.exit_layer
lowerCAmelCase : Dict = outputs[0]
if not self.training:
lowerCAmelCase : Dict = entropy(__A )
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Union[str, Any] = MSELoss()
lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : str = CrossEntropyLoss()
lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase : str = []
for highway_exit in outputs[-1]:
lowerCAmelCase : int = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Any = MSELoss()
lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : List[str] = CrossEntropyLoss()
lowerCAmelCase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase : Union[str, Any] = (loss,) + outputs
if not self.training:
lowerCAmelCase : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase : Optional[int] = []
def __UpperCamelCase ( _A : list[list[int]] , _A : int , _A : int ) -> Optional[Any]:
"""simple docstring"""
for i in range(len(UpperCamelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , len(UpperCamelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def __UpperCamelCase ( _A : list[list[int]] , _A : int ) -> List[Any]:
"""simple docstring"""
if row >= len(UpperCamelCase__ ):
solution.append(UpperCamelCase__ )
printboard(UpperCamelCase__ )
print()
return True
for i in range(len(UpperCamelCase__ ) ):
if is_safe(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase : Any = 1
solve(UpperCamelCase__ , row + 1 )
lowerCAmelCase : List[Any] = 0
return False
def __UpperCamelCase ( _A : list[list[int]] ) -> Optional[int]:
"""simple docstring"""
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(UpperCamelCase__ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase : Optional[Any] = 8
_lowerCAmelCase : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=False , ):
lowerCAmelCase : Optional[Any] = size if size is not None else {'height': 20, 'width': 20}
lowerCAmelCase : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : Tuple = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : str = image_size
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Tuple = max_resolution
lowerCAmelCase : str = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : Optional[int] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : List[Any] = do_normalize
lowerCAmelCase : Dict = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[Any] = do_reduce_labels
def lowercase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCAmelCase : Tuple = Image.open(dataset[0]['file'] )
lowerCAmelCase : int = Image.open(dataset[1]['file'] )
return image, map
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : List[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCAmelCase : List[Any] = Image.open(ds[0]['file'] )
lowerCAmelCase : Tuple = Image.open(ds[1]['file'] )
lowerCAmelCase : Tuple = Image.open(ds[2]['file'] )
lowerCAmelCase : Dict = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowercase ( self ):
lowerCAmelCase : Tuple = BeitImageProcessingTester(self )
@property
def lowercase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self ):
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'center_crop' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
def lowercase ( self ):
lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Tuple = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self ):
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
lowerCAmelCase : List[Any] = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCAmelCase , lowerCAmelCase : Optional[int] = prepare_semantic_single_inputs()
lowerCAmelCase : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
lowerCAmelCase , lowerCAmelCase : Tuple = prepare_semantic_batch_inputs()
lowerCAmelCase : Tuple = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase , lowerCAmelCase : List[Any] = prepare_semantic_single_inputs()
lowerCAmelCase : int = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
lowerCAmelCase : Any = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( _A : str ) -> dict[str, str]:
"""simple docstring"""
lowerCAmelCase : Tuple = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase : str = remove_duplicates(key.upper() )
lowerCAmelCase : Any = len(_A )
# First fill cipher with key characters
lowerCAmelCase : Tuple = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
lowerCAmelCase : Any = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase : int = alphabet[i - offset]
lowerCAmelCase : int = char
return cipher_alphabet
def __UpperCamelCase ( _A : str , _A : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def __UpperCamelCase ( _A : str , _A : dict[str, str] ) -> str:
"""simple docstring"""
lowerCAmelCase : List[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Any = input('Enter message to encode or decode: ' ).strip()
lowerCAmelCase : str = input('Enter keyword: ' ).strip()
lowerCAmelCase : List[Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
lowerCAmelCase : Any = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
lowerCAmelCase : Optional[int] = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 18}
lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : str = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Dict = image_size
lowerCAmelCase : List[Any] = min_resolution
lowerCAmelCase : Union[str, Any] = max_resolution
lowerCAmelCase : List[str] = do_resize
lowerCAmelCase : Tuple = size
lowerCAmelCase : Dict = do_center_crop
lowerCAmelCase : List[str] = crop_size
lowerCAmelCase : List[Any] = do_normalize
lowerCAmelCase : str = image_mean
lowerCAmelCase : Optional[int] = image_std
def lowercase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[str] = LevitImageProcessor if is_vision_available() else None
def lowercase ( self ):
lowerCAmelCase : Any = LevitImageProcessingTester(self )
@property
def lowercase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self ):
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'do_center_crop' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def lowercase ( self ):
lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase ( self ):
pass
def lowercase ( self ):
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self ):
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self ):
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Any = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__snake_case ):
_lowerCamelCase : Tuple = ["""keras_nlp"""]
def __init__( self , *snake_case__ , **snake_case__ ):
requires_backends(self , ['keras_nlp'] )
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase ( a , a , a , unittest.TestCase ):
_lowerCamelCase : int = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
_lowerCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
lowerCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : Tuple = CLIPTextModel(A__ )
lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : Tuple = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(A__ ).startswith('mps' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(A__ )
else:
lowerCAmelCase : Optional[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
lowerCAmelCase : Dict = 2
lowerCAmelCase : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , )
lowerCAmelCase : List[str] = floats_tensor(control_image.shape , rng=random.Random(A__ ) ).to(A__ )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : str = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(snake_case__ ):
if isinstance(A__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
lowerCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : Any = CLIPTextModel(A__ )
lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase : Any = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(A__ ).startswith('mps' ):
lowerCAmelCase : List[Any] = torch.manual_seed(A__ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
lowerCAmelCase : List[str] = 2
lowerCAmelCase : Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
]
lowerCAmelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(A__ ) ).to(A__ )
lowerCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Tuple = self.pipeline_class(**A__ )
pipe.to(A__ )
lowerCAmelCase : List[str] = 10.0
lowerCAmelCase : Union[str, Any] = 4
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(A__ )
lowerCAmelCase : str = steps
lowerCAmelCase : Dict = scale
lowerCAmelCase : Optional[Any] = pipe(**A__ )[0]
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(A__ )
lowerCAmelCase : Dict = steps
lowerCAmelCase : Dict = scale
lowerCAmelCase : List[str] = pipe(**A__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase : str = self.get_dummy_inputs(A__ )
lowerCAmelCase : Dict = steps
lowerCAmelCase : Optional[int] = scale
lowerCAmelCase : Dict = pipe(**A__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase : List[str] = self.get_dummy_inputs(A__ )
lowerCAmelCase : List[str] = steps
lowerCAmelCase : Optional[int] = scale
lowerCAmelCase : int = pipe(**A__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowercase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=A__ , controlnet=A__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
lowerCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : List[Any] = """evil space-punk bird"""
lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
lowerCAmelCase : List[str] = pipe(
A__ , A__ , control_image=A__ , generator=A__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = 'lxmert'
_lowerCamelCase : str = {}
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=9500 , snake_case__=1600 , snake_case__=400 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=9 , snake_case__=5 , snake_case__=5 , snake_case__=2048 , snake_case__=4 , snake_case__=6.6_7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , **snake_case__ , ):
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Tuple = num_qa_labels
lowerCAmelCase : List[str] = num_object_labels
lowerCAmelCase : int = num_attr_labels
lowerCAmelCase : int = l_layers
lowerCAmelCase : List[Any] = x_layers
lowerCAmelCase : List[str] = r_layers
lowerCAmelCase : Union[str, Any] = visual_feat_dim
lowerCAmelCase : Tuple = visual_pos_dim
lowerCAmelCase : str = visual_loss_normalizer
lowerCAmelCase : List[str] = task_matched
lowerCAmelCase : Optional[Any] = task_mask_lm
lowerCAmelCase : Union[str, Any] = task_obj_predict
lowerCAmelCase : Tuple = task_qa
lowerCAmelCase : List[Any] = visual_obj_loss
lowerCAmelCase : List[str] = visual_attr_loss
lowerCAmelCase : Dict = visual_feat_loss
lowerCAmelCase : Any = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_a )
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
_lowerCAmelCase : str = 50 # max width of layer names
_lowerCAmelCase : int = 70 # max width of quantizer names
def __UpperCamelCase ( _A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Dict = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_lowerCamelCase , type=_lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __UpperCamelCase ( _A : str ) -> Any:
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowerCAmelCase : Any = 'histogram'
elif args.calibrator == "mse":
lowerCAmelCase : Tuple = 'histogram'
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
lowerCAmelCase : Optional[int] = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCAmelCase : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Dict=False , _A : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
logger.info('Configuring Model for Quantization' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['embeddings'] , which='weight' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def __UpperCamelCase ( _A : Tuple , _A : List[Any] ) -> List[str]:
"""simple docstring"""
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def __UpperCamelCase ( _A : Optional[int] , _A : Dict ) -> str:
"""simple docstring"""
def fusea(_A : int , _A : str , _A : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowerCAmelCase : Union[str, Any] = qq._amax.detach().item()
lowerCAmelCase : Optional[int] = qk._amax.detach().item()
lowerCAmelCase : Tuple = qv._amax.detach().item()
lowerCAmelCase : List[str] = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __UpperCamelCase ( _A : Any , _A : Dict ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowerCAmelCase : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCAmelCase : int = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase : Optional[Any] = mod.weight.shape[0]
lowerCAmelCase : Dict = mod._weight_quantizer._amax.detach()
lowerCAmelCase : Optional[Any] = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase : Optional[int] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase : int = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
lowerCAmelCase : Dict = amax
def __UpperCamelCase ( _A : Optional[int] , _A : Any=25 , _A : Optional[int]=1_80 , _A : Any=None ) -> Any:
"""simple docstring"""
if ignore is None:
lowerCAmelCase : List[str] = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase : int = [ignore]
lowerCAmelCase : List[str] = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , 'weight' ):
continue
lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCAmelCase : int = getattr(_lowerCamelCase , '_input_quantizer' , _lowerCamelCase )
lowerCAmelCase : Dict = getattr(_lowerCamelCase , '_weight_quantizer' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , 'weight' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCAmelCase : Any = F"Act:{input_q.extra_repr()}"
lowerCAmelCase : int = F"Wgt:{weight_q.extra_repr()}"
lowerCAmelCase : Optional[int] = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def __UpperCamelCase ( _A : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def __UpperCamelCase ( _A : Tuple , _A : List[str] , _A : str , _A : str , _A : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def __UpperCamelCase ( _A : List[str] , _A : Any , _A : Union[str, Any]="both" , **_A : Any ) -> str:
"""simple docstring"""
lowerCAmelCase : int = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_input_quantizer' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_weight_quantizer' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def __UpperCamelCase ( _A : Tuple , _A : Union[str, Any] , **_A : List[Any] ) -> int:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_input_quantizer' ) or hasattr(_lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase : Dict = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
from itertools import permutations
def __UpperCamelCase ( _A : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase : Any = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __UpperCamelCase ( _A : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : Dict ) -> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase : List[Any] = False
if num < 0:
lowerCAmelCase : List[str] = True
lowerCAmelCase : str = -num
lowerCAmelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __UpperCamelCase ( _A : Any , _A : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = Mock()
lowerCAmelCase : List[Any] = conn, Mock()
lowerCAmelCase : Any = iter([1, None] )
lowerCAmelCase : List[Any] = lambda _A : next(__lowercase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=__lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
_lowerCamelCase : List[Any] = StableDiffusionPanoramaPipeline
_lowerCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : List[str] = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : List[Any] = CLIPTextModel(_A )
lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(_A )
lowerCAmelCase : Union[str, Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : int = StableDiffusionPanoramaPipeline(**_A )
lowerCAmelCase : Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase : int = self.get_dummy_inputs(_A )
lowerCAmelCase : List[Any] = sd_pipe(**_A ).images
lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Optional[Any] = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase ( self ):
lowerCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase : int = StableDiffusionPanoramaPipeline(**_A )
lowerCAmelCase : Any = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase : str = self.get_dummy_inputs(_A )
lowerCAmelCase : Dict = 'french fries'
lowerCAmelCase : Optional[int] = sd_pipe(**_A , negative_prompt=_A )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Dict = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : int = StableDiffusionPanoramaPipeline(**_A )
lowerCAmelCase : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase : Tuple = self.get_dummy_inputs(_A )
lowerCAmelCase : Any = sd_pipe(**_A , view_batch_size=2 )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : str = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**_A )
lowerCAmelCase : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase : List[str] = self.get_dummy_inputs(_A )
lowerCAmelCase : Optional[Any] = sd_pipe(**_A ).images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[str] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self ):
lowerCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=_A )
lowerCAmelCase : int = StableDiffusionPanoramaPipeline(**_A )
lowerCAmelCase : Union[str, Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase : Any = self.get_dummy_inputs(_A )
lowerCAmelCase : Union[str, Any] = sd_pipe(**_A ).images
lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Dict = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__=0 ):
lowerCAmelCase : Optional[int] = torch.manual_seed(_A )
lowerCAmelCase : List[Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = 'stabilityai/stable-diffusion-2-base'
lowerCAmelCase : str = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
lowerCAmelCase : List[str] = self.get_inputs()
lowerCAmelCase : int = pipe(**_A ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase : List[Any] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase ( self ):
lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=_A )
lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
lowerCAmelCase : List[Any] = self.get_inputs()
lowerCAmelCase : List[Any] = pipe(**_A ).images
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self ):
lowerCAmelCase : Tuple = 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
lowerCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase : List[Any] = latents[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase : int = latents[0, -3:, -3:, -1]
lowerCAmelCase : Union[str, Any] = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = 'stabilityai/stable-diffusion-2-base'
lowerCAmelCase : Union[str, Any] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
lowerCAmelCase : Optional[Any] = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : List[Any] = 'stabilityai/stable-diffusion-2-base'
lowerCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
lowerCAmelCase : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
lowerCAmelCase : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : List[str] = self.get_inputs()
lowerCAmelCase : Any = pipe(**_A )
lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase : Optional[int] = mock.Mock()
lowerCAmelCase : Union[str, Any] = 500
lowerCAmelCase : str = {}
lowerCAmelCase : List[str] = HTTPError
lowerCAmelCase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head:
lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowercase ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase : Union[str, Any] = mock.Mock()
lowerCAmelCase : Any = 500
lowerCAmelCase : int = {}
lowerCAmelCase : int = HTTPError
lowerCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head:
lowerCAmelCase : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
lowerCAmelCase : Union[str, Any] = tempfile.mktemp()
with open(a_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , a_ )
lowerCAmelCase : Dict = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , a_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def lowercase ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase : Tuple = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
_lowerCamelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowercase ( cls ):
lowerCAmelCase : Tuple = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowercase ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : int = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase : str = BertTokenizer(a_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
lowerCAmelCase : List[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id='test-tokenizer' , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase : str = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[Any] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase : Any = BertTokenizer(a_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
lowerCAmelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowercase ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[int] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase : Tuple = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[Any] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
lowerCAmelCase : List[str] = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def lowercase ( self ):
lowerCAmelCase : int = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def lowercase ( self ):
lowerCAmelCase : Any = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def lowercase ( self ):
lowerCAmelCase : Tuple = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def lowercase ( self ):
lowerCAmelCase : List[str] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def lowercase ( self ):
lowerCAmelCase : str = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def lowercase ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCAmelCase : Dict = Trie()
lowerCAmelCase : str = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ['AB', 'C'] )
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : Tuple ) -> List[str]:
"""simple docstring"""
if edge <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCamelCase ( _A : Tuple ) -> Tuple:
"""simple docstring"""
if edge <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
def lowercase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def lowercase ( self , snake_case__ , snake_case__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def lowercase ( self , snake_case__ , snake_case__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
def lowercase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def lowercase ( self , snake_case__ , snake_case__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def lowercase ( self , snake_case__ , snake_case__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCAmelCase ( lowerCAmelCase__ ):
@require_beam
def lowercase ( self ):
lowerCAmelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase : Optional[Any] = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase : str = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowercase ( self ):
import apache_beam as beam
lowerCAmelCase : Any = beam.io.parquetio.WriteToParquet
lowerCAmelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase : Any = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
lowerCAmelCase : int = partial(_SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase : Tuple = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase : str = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase ( self ):
lowerCAmelCase : List[str] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase : Optional[int] = NestedBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
lowerCAmelCase : List[str] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Dict = '▁'
_lowerCAmelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase : List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase : Optional[int] = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class lowerCAmelCase ( _A ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
lowerCAmelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase : str = 1
lowerCAmelCase : Optional[int] = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCAmelCase : Optional[int] = self.__dict__.copy()
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase ( self , snake_case__ , snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
lowerCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase ( self ):
lowerCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self , snake_case__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowercase ( self , snake_case__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : List[str] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self , snake_case__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def lowercase ( self , snake_case__ , snake_case__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class lowerCAmelCase ( snake_case__ ):
_lowerCamelCase : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowerCamelCase : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
_lowerCamelCase : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_lowerCamelCase : str = "text"
_lowerCamelCase : str = "labels"
def lowercase ( self , snake_case__ ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
lowerCAmelCase : Any = copy.deepcopy(self )
lowerCAmelCase : Dict = self.label_schema.copy()
lowerCAmelCase : Any = features[self.label_column]
lowerCAmelCase : Optional[int] = label_schema
return task_template
@property
def lowercase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Any , _A : Union[str, Any]=False ) -> Any:
"""simple docstring"""
lowerCAmelCase : Any = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A : int , _A : Optional[int] , _A : int=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Tuple = ''
else:
lowerCAmelCase : List[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Optional[int] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[str] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : str = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase : Dict = dct.pop(__snake_case )
lowerCAmelCase : str = val
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : List[str] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : List[str]=False ) -> str:
"""simple docstring"""
lowerCAmelCase : Optional[int] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=__snake_case , )
lowerCAmelCase : Tuple = ViTHybridConfig(backbone_config=__snake_case , image_size=3_84 , num_labels=10_00 )
lowerCAmelCase : Union[str, Any] = False
# load original model from timm
lowerCAmelCase : str = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
lowerCAmelCase : Tuple = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
lowerCAmelCase : int = 'huggingface/label-files'
lowerCAmelCase : Tuple = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[str] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Any = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : Union[str, Any] = ViTHybridModel(__snake_case ).eval()
else:
lowerCAmelCase : int = ViTHybridForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# create image processor
lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=__snake_case ) )
lowerCAmelCase : str = transform.transforms
lowerCAmelCase : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase : Any = ViTHybridImageProcessor(
do_resize=__snake_case , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__snake_case , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase : Any = prepare_img()
lowerCAmelCase : Any = transform(__snake_case ).unsqueeze(0 )
lowerCAmelCase : str = processor(__snake_case , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__snake_case , __snake_case )
# verify logits
with torch.no_grad():
lowerCAmelCase : str = model(__snake_case )
lowerCAmelCase : Union[str, Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
lowerCAmelCase : Dict = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase : Any = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_lowerCAmelCase : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCamelCase ( _A : Union[str, Any] , _A : str , _A : str , _A : Any , _A : Any ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ ) as metadata_file:
lowerCAmelCase : Tuple = json.load(lowerCamelCase_ )
lowerCAmelCase : str = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCAmelCase : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )["""module"""]
# Load the entity vocab file
lowerCAmelCase : Optional[int] = load_original_entity_vocab(lowerCamelCase_ )
# add an entry for [MASK2]
lowerCAmelCase : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase : str = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
lowerCAmelCase : Tuple = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCAmelCase : Dict = json.load(lowerCamelCase_ )
lowerCAmelCase : Union[str, Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase : Any = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCAmelCase : int = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase : Dict = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase : Any = state_dict[bias_name]
lowerCAmelCase : int = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase : Dict = F"encoder.layer.{layer_index}.attention.self."
lowerCAmelCase : Any = state_dict[prefix + matrix_name]
lowerCAmelCase : int = state_dict[prefix + matrix_name]
lowerCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase : List[Any] = state_dict["""entity_predictions.bias"""]
lowerCAmelCase : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase : Tuple = LukeForMaskedLM(config=lowerCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCAmelCase : Optional[Any] = state_dict[key]
else:
lowerCAmelCase : Dict = state_dict[key]
lowerCAmelCase : str = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if set(lowerCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase : str = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
lowerCAmelCase : Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase : Union[str, Any] = (0, 9)
lowerCAmelCase : Dict = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase : List[Any] = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : List[Any] = torch.Size((1, 33, 7_68) )
lowerCAmelCase : Any = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : List[Any] = torch.Size((1, 1, 7_68) )
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
lowerCAmelCase : Dict = """Tokyo is the capital of <mask>."""
lowerCAmelCase : Optional[int] = (24, 30)
lowerCAmelCase : Optional[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase : Dict = model(**lowerCamelCase_ )
lowerCAmelCase : List[str] = encoding["""input_ids"""][0].tolist()
lowerCAmelCase : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCAmelCase : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase_ )
lowerCAmelCase : Dict = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def __UpperCamelCase ( _A : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase : List[Any] = [json.loads(lowerCamelCase_ ) for line in open(lowerCamelCase_ )]
lowerCAmelCase : Tuple = {}
for entry in data:
lowerCAmelCase : List[str] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase : Optional[int] = entity_id
break
lowerCAmelCase : int = F"{language}:{entity_name}"
lowerCAmelCase : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
from math import isclose, sqrt
def __UpperCamelCase ( _A : float , _A : float , _A : float ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : int = point_y / 4 / point_x
lowerCAmelCase : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase : Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase : int = outgoing_gradient**2 + 4
lowerCAmelCase : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCAmelCase : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase : List[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase : Optional[int] = x_minus if isclose(A__ , A__ ) else x_plus
lowerCAmelCase : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCamelCase ( _A : float = 1.4 , _A : float = -9.6 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = first_x_coord
lowerCAmelCase : Tuple = first_y_coord
lowerCAmelCase : Optional[Any] = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = next_point(A__ , A__ , A__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=32 , snake_case__=3 , snake_case__=4 , snake_case__=[10, 20, 30, 40] , snake_case__=[2, 2, 3, 2] , snake_case__=True , snake_case__=True , snake_case__=37 , snake_case__="gelu" , snake_case__=10 , snake_case__=0.0_2 , snake_case__=["stage2", "stage3", "stage4"] , snake_case__=[2, 3, 4] , snake_case__=None , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Tuple = num_stages
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Any = depths
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[Any] = out_features
lowerCAmelCase : Union[str, Any] = out_indices
lowerCAmelCase : Tuple = scope
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = ConvNextVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = ConvNextVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase : str = None
lowerCAmelCase : List[Any] = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = config_and_inputs
lowerCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase : Dict = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCamelCase : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase : str = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : int = False
def lowercase ( self ):
lowerCAmelCase : List[str] = ConvNextVaModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def lowercase ( self ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def lowercase ( self ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def lowercase ( self ):
pass
def lowercase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : List[str] = True
if model_class.__name__ in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]:
continue
lowerCAmelCase : List[str] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(**snake_case__ ).loss
loss.backward()
def lowercase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Dict = True
if (
model_class.__name__
in [*get_values(snake_case__ ), *get_values(snake_case__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase : str = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase : str = model(**snake_case__ ).loss
loss.backward()
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : str = [*signature.parameters.keys()]
lowerCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase ( self ):
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = ConvNextVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(snake_case__ )
lowerCAmelCase : Dict = self.default_image_processor
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : Optional[Any] = preprocessor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
lowerCAmelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.0_2 , snake_case__=[1, 16, 4, 4] , snake_case__=None , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : str = image_size
lowerCAmelCase : Dict = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Dict = scope
lowerCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase : Optional[int] = (self.image_size // 32) ** 2
lowerCAmelCase : Optional[Any] = num_patches + 1
def lowercase ( self ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
lowerCAmelCase : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = ViTHybridModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = self.type_sequence_label_size
lowerCAmelCase : Dict = ViTHybridForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_lowerCamelCase : int = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
lowerCAmelCase : str = ViTHybridModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def lowercase ( self ):
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(lowercase_ )
lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : int = [*signature.parameters.keys()]
lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def lowercase ( self ):
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(config=lowercase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase : Any = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def lowercase ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = ViTHybridModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase_ )
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : List[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[str] = model(**lowercase_ )
# verify the logits
lowerCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase : List[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
@require_accelerate
def lowercase ( self ):
lowerCAmelCase : Any = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
lowerCAmelCase : str = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : List[str] = image_processor(images=lowercase_ , return_tensors='pt' )
lowerCAmelCase : Tuple = model(**lowercase_ )
lowerCAmelCase : str = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _A : list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCamelCase ( _A ) -> str:
"""simple docstring"""
return "".join(sorted(_A ) )
def __UpperCamelCase ( _A ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(_A )]
_lowerCAmelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_lowerCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
_lowerCAmelCase : Optional[int] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCAmelCase : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Optional[int] = """BlipImageProcessor"""
_lowerCamelCase : Union[str, Any] = """AutoTokenizer"""
def __init__( self , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = False
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = self.image_processor
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCAmelCase : List[str] = self.tokenizer
lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowerCAmelCase : Any = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowerCAmelCase : Tuple = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def lowercase ( self , *snake_case__ , **snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase ( self , *snake_case__ , **snake_case__ ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase ( self ):
lowerCAmelCase : Any = self.tokenizer.model_input_names
lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCAmelCase : str = TypeVar('_T')
class lowerCAmelCase ( Generic[_T] ):
def __init__( self , snake_case__ = None ):
lowerCAmelCase : Dict = list(iterable or [] )
lowerCAmelCase : Any = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def lowercase ( self , snake_case__ ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self._stacka.pop
lowerCAmelCase : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( _snake_case ):
_lowerCamelCase : Dict = (DDPMScheduler,)
def lowercase ( self , **snake_case__ ):
lowerCAmelCase : Dict = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case_ )
return config
def lowercase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowercase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def lowercase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def lowercase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def lowercase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def lowercase ( self ):
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , )
def lowercase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowercase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowercase ( self ):
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Optional[int] = scheduler_class(**snake_case_ )
lowerCAmelCase : Any = len(snake_case_ )
lowerCAmelCase : str = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
lowerCAmelCase : List[str] = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : int = pred_prev_sample
lowerCAmelCase : Any = torch.sum(torch.abs(snake_case_ ) )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : str = len(snake_case_ )
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : Dict = self.dummy_sample_deter
lowerCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : int = pred_prev_sample
lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case_ ) )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowercase ( self ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
lowerCAmelCase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
lowerCAmelCase : Tuple = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
lowerCAmelCase : int = -1
else:
lowerCAmelCase : Optional[Any] = timesteps[i + 1]
lowerCAmelCase : str = scheduler.previous_timestep(snake_case_ )
lowerCAmelCase : List[Any] = prev_t.item()
self.assertEqual(snake_case_ , snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config()
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : Union[str, Any] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : List[str] = [100, 87, 50, 1, 0]
lowerCAmelCase : Optional[int] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
lowerCAmelCase : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase ( lowercase__ ):
def __lt__( self , snake_case__ ):
return self[-1] < other[-1]
def __eq__( self , snake_case__ ):
return self[-1] == other[-1]
def __UpperCamelCase ( _A : list ) -> list:
"""simple docstring"""
lowerCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
lowerCAmelCase : List[Any] = Stack([element] )
lowerCAmelCase : Union[str, Any] = bisect_left(_A , _A )
if i != len(_A ):
stacks[i].append(_A )
else:
stacks.append(_A )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase : Tuple = merge(*(reversed(_A ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
import math
def __UpperCamelCase ( _A : Optional[Any] , _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__ ):
lowerCAmelCase : int = name
lowerCAmelCase : Any = val
def __str__( self ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self , snake_case__ ):
return self.val < other.val
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : Optional[int] = self.build_heap(__lowerCamelCase )
def __getitem__( self , snake_case__ ):
return self.get_value(__lowerCamelCase )
def lowercase ( self , snake_case__ ):
return (idx - 1) // 2
def lowercase ( self , snake_case__ ):
return idx * 2 + 1
def lowercase ( self , snake_case__ ):
return idx * 2 + 2
def lowercase ( self , snake_case__ ):
return self.heap_dict[key]
def lowercase ( self , snake_case__ ):
lowerCAmelCase : int = len(__lowerCamelCase ) - 1
lowerCAmelCase : Tuple = self.get_parent_idx(__lowerCamelCase )
for idx, i in enumerate(__lowerCamelCase ):
lowerCAmelCase : Dict = idx
lowerCAmelCase : Dict = i.val
for i in range(__lowerCamelCase , -1 , -1 ):
self.sift_down(__lowerCamelCase , __lowerCamelCase )
return array
def lowercase ( self , snake_case__ , snake_case__ ):
while True:
lowerCAmelCase : Dict = self.get_left_child_idx(__lowerCamelCase ) # noqa: E741
lowerCAmelCase : Any = self.get_right_child_idx(__lowerCamelCase )
lowerCAmelCase : str = idx
if l < len(__lowerCamelCase ) and array[l] < array[idx]:
lowerCAmelCase : Optional[int] = l
if r < len(__lowerCamelCase ) and array[r] < array[smallest]:
lowerCAmelCase : str = r
if smallest != idx:
lowerCAmelCase : Optional[int] = array[smallest], array[idx]
(
lowerCAmelCase
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : Optional[int] = smallest
else:
break
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Dict = self.get_parent_idx(__lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase : Optional[Any] = self.heap[idx], self.heap[p]
lowerCAmelCase : List[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : int = self.get_parent_idx(__lowerCamelCase )
def lowercase ( self ):
return self.heap[0]
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.heap[-1], self.heap[0]
lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : str = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase ( self , snake_case__ ):
self.heap.append(__lowerCamelCase )
lowerCAmelCase : Tuple = len(self.heap ) - 1
lowerCAmelCase : str = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase ( self ):
return len(self.heap ) == 0
def lowercase ( self , snake_case__ , snake_case__ ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : int = new_value
lowerCAmelCase : Tuple = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCAmelCase : Optional[Any] = Node('R', -1)
_lowerCAmelCase : Dict = Node('B', 6)
_lowerCAmelCase : List[Any] = Node('A', 3)
_lowerCAmelCase : List[Any] = Node('X', 1)
_lowerCAmelCase : Union[str, Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase : Any = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : int = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCAmelCase : Optional[Any] = DetaConfig(
backbone_config=UpperCamelCase__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=UpperCamelCase__ , with_box_refine=UpperCamelCase__ , two_stage=UpperCamelCase__ , )
# set labels
lowerCAmelCase : List[Any] = 'huggingface/label-files'
if "o365" in model_name:
lowerCAmelCase : Dict = 3_66
lowerCAmelCase : Optional[Any] = 'object365-id2label.json'
else:
lowerCAmelCase : str = 91
lowerCAmelCase : int = 'coco-detection-id2label.json'
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase : Optional[int] = idalabel
lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A : Union[str, Any] , _A : Dict , _A : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = dct.pop(UpperCamelCase__ )
lowerCAmelCase : Tuple = val
def __UpperCamelCase ( _A : Any , _A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase : Optional[int] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCAmelCase : List[str] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[:dim, :]
lowerCAmelCase : Any = in_proj_bias[: dim]
lowerCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase : Dict = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase : Any = in_proj_weight[
-dim :, :
]
lowerCAmelCase : int = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A : List[str] , _A : Any ) -> int:
"""simple docstring"""
lowerCAmelCase : Dict = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase : Optional[Any] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
lowerCAmelCase : List[str] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : int = in_proj_weight[:hidden_size, :]
lowerCAmelCase : Union[str, Any] = in_proj_bias[:hidden_size]
lowerCAmelCase : str = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase : Tuple = in_proj_weight[-hidden_size:, :]
lowerCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size:]
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : str , _A : List[Any] , _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = get_deta_config(UpperCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase : Tuple = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase : str = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F"Model name {model_name} not supported" )
lowerCAmelCase : List[str] = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
# rename keys
lowerCAmelCase : int = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase : str = state_dict.pop(UpperCamelCase__ )
lowerCAmelCase : List[Any] = val
if "input_proj" in key:
lowerCAmelCase : List[str] = state_dict.pop(UpperCamelCase__ )
lowerCAmelCase : Tuple = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase : List[str] = state_dict.pop(UpperCamelCase__ )
lowerCAmelCase : List[str] = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase : Dict = DetaForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
lowerCAmelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(UpperCamelCase__ )
# load image processor
lowerCAmelCase : Union[str, Any] = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : Optional[int] = processor(images=UpperCamelCase__ , return_tensors='pt' )
lowerCAmelCase : Tuple = encoding['pixel_values']
lowerCAmelCase : Any = model(pixel_values.to(UpperCamelCase__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase : Any = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
lowerCAmelCase : List[str] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase : Dict = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
lowerCAmelCase : int = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase__ ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase ( snake_case__ ):
_lowerCamelCase : Dict = """microsoft/speecht5_tts"""
_lowerCamelCase : Tuple = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase : Union[str, Any] = """text_reader"""
_lowerCamelCase : List[Any] = SpeechTaProcessor
_lowerCamelCase : Optional[Any] = SpeechTaForTextToSpeech
_lowerCamelCase : Tuple = SpeechTaHifiGan
_lowerCamelCase : List[Any] = ["""text"""]
_lowerCamelCase : Tuple = ["""audio"""]
def lowercase ( self ):
if self.post_processor is None:
lowerCAmelCase : Dict = "microsoft/speecht5_hifigan"
super().setup()
def lowercase ( self , snake_case__ , snake_case__=None ):
lowerCAmelCase : Tuple = self.pre_processor(text=lowercase_ , return_tensors='pt' , truncation=lowercase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase : Optional[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase : List[Any] = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase ( self , snake_case__ ):
with torch.no_grad():
return self.model.generate_speech(**lowercase_ )
def lowercase ( self , snake_case__ ):
with torch.no_grad():
return self.post_processor(lowercase_ ).cpu().detach()
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
super().__init__(*A_ , **A_ )
lowerCAmelCase : str = eval_examples
lowerCAmelCase : str = post_process_function
lowerCAmelCase : str = quant_trainer_args
lowerCAmelCase : str = 128 # default number of calibration samples
def lowercase ( self , snake_case__=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCAmelCase : Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase : Any = self._remove_unused_columns(A_ , description='Calibration' )
return DataLoader(
A_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A_ , )
def lowercase ( self , snake_case__=None ):
lowerCAmelCase : Optional[Any] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase : Dict = self.get_calib_dataloader(A_ )
lowerCAmelCase : Tuple = self.model
quant_trainer.configure_model(A_ , self.quant_trainer_args , calib=A_ )
model.eval()
quant_trainer.enable_calibration(A_ )
logger.info('***** Running calibration *****' )
logger.info(f" Num examples = {self.calib_num}" )
logger.info(f" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A_ ):
# Prediction step
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = self.prediction_step(A_ , A_ , prediction_loss_only=A_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_ , self.quant_trainer_args )
lowerCAmelCase : str = model
def lowercase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ):
lowerCAmelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase : str = self.get_eval_dataloader(A_ )
lowerCAmelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Optional[Any] = self.compute_metrics
lowerCAmelCase : int = None
lowerCAmelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : Optional[int] = eval_loop(
A_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
lowerCAmelCase : int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase : str = self.post_process_function(A_ , A_ , output.predictions )
lowerCAmelCase : Union[str, Any] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
lowerCAmelCase : Dict = metrics.pop(A_ )
self.log(A_ )
else:
lowerCAmelCase : Dict = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ):
lowerCAmelCase : Optional[int] = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Any = self.compute_metrics
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : List[Any] = eval_loop(
A_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
lowerCAmelCase : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase : Any = self.post_process_function(A_ , A_ , output.predictions , 'predict' )
lowerCAmelCase : Dict = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
lowerCAmelCase : Optional[Any] = metrics.pop(A_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
def lowercase ( self , snake_case__="./" ):
lowerCAmelCase : Dict = self.eval_dataset
lowerCAmelCase : Dict = self.get_eval_dataloader(A_ )
lowerCAmelCase : Dict = next(iter(A_ ) )
# saving device - to make it consistent
lowerCAmelCase : List[str] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCAmelCase : List[Any] = tuple(v.to(A_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase : Dict = True
lowerCAmelCase : Any = self.model.to(A_ )
model.eval()
model.float()
lowerCAmelCase : Union[str, Any] = model.module if hasattr(A_ , 'module' ) else model
quant_trainer.configure_model(A_ , self.quant_trainer_args )
lowerCAmelCase : List[str] = os.path.join(A_ , 'model.onnx' )
logger.info(f"exporting model to {output_model_file}" )
lowerCAmelCase : List[str] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
A_ , A_ , A_ , export_params=A_ , opset_version=13 , do_constant_folding=A_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=A_ , )
logger.info('onnx export finished' )
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _A : list[int] ) -> list[int]:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return array
lowerCAmelCase : Optional[int] = min(_lowerCamelCase ), max(_lowerCamelCase )
# Compute the variables
lowerCAmelCase : Any = _max - _min + 1
lowerCAmelCase : List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowerCAmelCase : Union[str, Any] = i - _min
lowerCAmelCase : str = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowerCAmelCase : List[Any] = 0
for i in range(_lowerCamelCase ):
while holes_repeat[i] > 0:
lowerCAmelCase : Tuple = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = input('Enter numbers separated by comma:\n')
_lowerCAmelCase : str = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , ):
lowerCAmelCase : int = parent
lowerCAmelCase : List[Any] = 13
lowerCAmelCase : str = 7
lowerCAmelCase : Any = True
lowerCAmelCase : str = True
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[int] = 32
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : str = 37
lowerCAmelCase : str = 'gelu'
lowerCAmelCase : List[str] = 0.1
lowerCAmelCase : Union[str, Any] = 0.1
lowerCAmelCase : Optional[int] = 512
lowerCAmelCase : List[str] = 16
lowerCAmelCase : int = 2
lowerCAmelCase : Optional[int] = 0.0_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Any = 4
lowerCAmelCase : int = None
def lowercase ( self ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = TFDistilBertModel(config=__UpperCamelCase )
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[str] = model(__UpperCamelCase )
lowerCAmelCase : List[str] = [input_ids, input_mask]
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = TFDistilBertForMaskedLM(config=__UpperCamelCase )
lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = TFDistilBertForQuestionAnswering(config=__UpperCamelCase )
lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowerCAmelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : int = TFDistilBertForSequenceClassification(__UpperCamelCase )
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_choices
lowerCAmelCase : List[Any] = TFDistilBertForMultipleChoice(__UpperCamelCase )
lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowerCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : List[Any] = TFDistilBertForTokenClassification(__UpperCamelCase )
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Tuple = config_and_inputs
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : List[Any] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = TFDistilBertModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase : Dict = TFDistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : List[Any] = model(__UpperCamelCase )[0]
lowerCAmelCase : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
lowerCAmelCase : List[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase : Any = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = logging.get_verbosity()
lowerCAmelCase : Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : str = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case__ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowercase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : Optional[int] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case__ )
lowerCAmelCase : Any = logging.log_levels[env_level_str]
lowerCAmelCase : Dict = logging.get_verbosity()
self.assertEqual(
snake_case__ , snake_case__ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
lowerCAmelCase : int = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowercase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : str = logging.logging.getLogger()
with CaptureLogger(snake_case__ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowercase ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : List[Any] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , msg + '\n' )
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
def __UpperCamelCase ( _A : str , _A : str = " " ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : int = 0
for index, char in enumerate(lowerCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase : Any = index + 1
elif index + 1 == len(lowerCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
from math import factorial
def __UpperCamelCase ( _A : Optional[int] = 1_00 ) -> int:
"""simple docstring"""
return sum(map(_A , str(factorial(_A ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=224 , snake_case__=1000 , snake_case__=[3, 3, 6, 4] , snake_case__=[48, 56, 112, 220] , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = layer_depths
lowerCAmelCase : List[str] = embed_dims
def lowercase ( self ):
lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : int = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCamelCase__ , layer_scale_init_value=1e-5 , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = SwiftFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : str = SwiftFormerForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCAmelCase : Any = SwiftFormerForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ):
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : str = self.prepare_config_and_inputs()
lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_lowerCamelCase : Any = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : List[str] = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = False
def lowercase ( self ):
lowerCAmelCase : List[str] = SwiftFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowercase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(UpperCamelCase__ )
lowerCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowercase ( self ):
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowercase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = SwiftFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowercase ( self ):
pass
def lowercase ( self ):
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase : int = outputs.hidden_states
lowerCAmelCase : Dict = 8
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCamelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase ( self ):
def _config_zero_init(snake_case__ ):
lowerCAmelCase : Dict = copy.deepcopy(UpperCamelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCamelCase__ , UpperCamelCase__ , 1e-1_0 )
if isinstance(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ):
lowerCAmelCase : Optional[int] = _config_zero_init(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return configs_no_init
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self ):
pass
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(UpperCamelCase__ )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : Any = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase : Optional[Any] = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowerCAmelCase : Any = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , a_ )
if matches:
lowerCAmelCase : List[Any] = float(matches[1] )
lowerCAmelCase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase : List[str] = 10_01
lowerCAmelCase : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase : Any = {int(a_ ) + 1: v for k, v in idalabel.items()}
lowerCAmelCase : int = '''background'''
lowerCAmelCase : str = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Optional[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : int , _A : str , _A : Tuple=False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : int = get_mobilenet_va_config(a_ )
# Load 🤗 model
lowerCAmelCase : Any = MobileNetVaForImageClassification(a_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a_ , a_ , a_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase : Tuple = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowerCAmelCase : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase : Optional[int] = model(**a_ )
lowerCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase : Optional[int] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase : str = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
lowerCAmelCase : Optional[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
if push_to_hub:
print('Pushing to the hub...' )
lowerCAmelCase : Union[str, Any] = '''google/''' + model_name
image_processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __UpperCamelCase ( _A : str ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : Dict = model_type_to_module_name(UpperCAmelCase__ )
lowerCAmelCase : Optional[Any] = importlib.import_module(F".{module_name}" , 'transformers.models' )
try:
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase__ , '__name__' , UpperCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : Union[str, Any] = importlib.import_module('transformers' )
if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ):
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
return None
def __UpperCamelCase ( _A : Union[str, os.PathLike] , _A : Optional[Union[str, os.PathLike]] = None , _A : bool = False , _A : bool = False , _A : Optional[Dict[str, str]] = None , _A : Optional[Union[bool, str]] = None , _A : Optional[str] = None , _A : bool = False , **_A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = get_file_from_repo(
UpperCAmelCase__ , UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(UpperCAmelCase__ , encoding='utf-8' ) as reader:
return json.load(UpperCAmelCase__ )
class lowerCAmelCase :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__UpperCamelCase )
def lowercase ( cls , snake_case__ , **snake_case__ ):
lowerCAmelCase : int = kwargs.pop('config' , __UpperCamelCase )
lowerCAmelCase : Any = kwargs.pop('trust_remote_code' , __UpperCamelCase )
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase , lowerCAmelCase : List[Any] = ImageProcessingMixin.get_image_processor_dict(__UpperCamelCase , **__UpperCamelCase )
lowerCAmelCase : Dict = config_dict.get('image_processor_type' , __UpperCamelCase )
lowerCAmelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase : Optional[int] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase : Optional[int] = config_dict.pop('feature_extractor_type' , __UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
lowerCAmelCase : Dict = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase : Optional[int] = config_dict['auto_map']['AutoFeatureExtractor']
lowerCAmelCase : Dict = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# It could be in `config.image_processor_type``
lowerCAmelCase : Union[str, Any] = getattr(__UpperCamelCase , 'image_processor_type' , __UpperCamelCase )
if hasattr(__UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase : Tuple = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
lowerCAmelCase : Optional[Any] = image_processor_class_from_name(__UpperCamelCase )
lowerCAmelCase : int = image_processor_auto_map is not None
lowerCAmelCase : List[str] = image_processor_class is not None or type(__UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase : Union[str, Any] = resolve_trust_remote_code(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Union[str, Any] = get_class_from_dynamic_module(
__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
lowerCAmelCase : Optional[Any] = kwargs.pop('code_revision' , __UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase : str = IMAGE_PROCESSOR_MAPPING[type(__UpperCamelCase )]
return image_processor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
raise ValueError(
f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def lowercase ( snake_case__ , snake_case__ ):
IMAGE_PROCESSOR_MAPPING.register(__UpperCamelCase , __UpperCamelCase )
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase : List[Any] = load_file(_UpperCamelCase )
lowerCAmelCase : List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase : Any = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase : Union[str, Any] = pipeline.text_encoder
else:
lowerCAmelCase : Optional[int] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase : List[str] = pipeline.unet
# find the target layer
lowerCAmelCase : Dict = layer_infos.pop(0 )
while len(_UpperCamelCase ) > -1:
try:
lowerCAmelCase : Any = curr_layer.__getattr__(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowerCAmelCase : Tuple = layer_infos.pop(0 )
elif len(_UpperCamelCase ) == 0:
break
except Exception:
if len(_UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase : int = layer_infos.pop(0 )
lowerCAmelCase : List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_UpperCamelCase )
else:
pair_keys.append(_UpperCamelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase : Optional[int] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCamelCase )
return pipeline
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.7_5, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase : Tuple = args.base_model_path
_lowerCAmelCase : int = args.checkpoint_path
_lowerCAmelCase : List[str] = args.dump_path
_lowerCAmelCase : Dict = args.lora_prefix_unet
_lowerCAmelCase : str = args.lora_prefix_text_encoder
_lowerCAmelCase : Tuple = args.alpha
_lowerCAmelCase : str = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCAmelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( __UpperCAmelCase ):
_lowerCamelCase : Dict = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Optional[int] = """Pix2StructImageProcessor"""
_lowerCamelCase : Dict = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case__ , snake_case__ ):
lowerCAmelCase : int = False
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 2048 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase : str = self.tokenizer
lowerCAmelCase : Any = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase : int = self.image_processor(
snake_case__ , return_tensors=snake_case__ , max_patches=snake_case__ , **snake_case__ )
else:
# add pixel_values and bbox
lowerCAmelCase : str = self.image_processor(
snake_case__ , return_tensors=snake_case__ , max_patches=snake_case__ , header_text=snake_case__ , **snake_case__ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase : int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
if "attention_mask" in text_encoding:
lowerCAmelCase : Any = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
lowerCAmelCase : Any = text_encoding.pop('input_ids' )
else:
lowerCAmelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def lowercase ( self , *snake_case__ , **snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase ( self , *snake_case__ , **snake_case__ ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
lowerCAmelCase : Dict = self.tokenizer.model_input_names
lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowerCAmelCase ( __lowerCAmelCase ):
_lowerCamelCase : List[str] = "openai-gpt"
_lowerCamelCase : int = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=4_0478 , snake_case__=512 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.0_2 , snake_case__="cls_index" , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=0.1 , **snake_case__ , ):
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Any = n_positions
lowerCAmelCase : List[Any] = n_embd
lowerCAmelCase : str = n_layer
lowerCAmelCase : Any = n_head
lowerCAmelCase : Dict = afn
lowerCAmelCase : Any = resid_pdrop
lowerCAmelCase : Any = embd_pdrop
lowerCAmelCase : str = attn_pdrop
lowerCAmelCase : List[str] = layer_norm_epsilon
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[int] = summary_type
lowerCAmelCase : List[str] = summary_use_proj
lowerCAmelCase : int = summary_activation
lowerCAmelCase : Tuple = summary_first_dropout
lowerCAmelCase : Dict = summary_proj_to_labels
super().__init__(**lowerCamelCase__ )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=0 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : List[str] = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : List[Any] = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Any = num_choices
lowerCAmelCase : Optional[int] = scope
lowerCAmelCase : Any = projection_dim
def lowercase ( self ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : Any = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
lowerCAmelCase : int = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : str = TFDPRContextEncoder(config=snake_case__ )
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = TFDPRQuestionEncoder(config=snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : str = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = TFDPRReader(config=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase ( self ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
lowerCAmelCase : Tuple = TFDPRModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Union[str, Any] = TFDPRReader.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[Any] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Tuple = model(snake_case__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : Any = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCamelCase ( _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = image.size
lowerCAmelCase , lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : int = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase : List[Any] = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
lowerCAmelCase : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase ( lowercase_ ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
self.register_modules(vqvae=snake_case__ , unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = None , snake_case__ = 1 , snake_case__ = 100 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
if isinstance(snake_case__ , PIL.Image.Image ):
lowerCAmelCase : List[Any] = 1
elif isinstance(snake_case__ , torch.Tensor ):
lowerCAmelCase : Any = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case__ )}" )
if isinstance(snake_case__ , PIL.Image.Image ):
lowerCAmelCase : Any = preprocess(snake_case__ )
lowerCAmelCase , lowerCAmelCase : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase : str = next(self.unet.parameters() ).dtype
lowerCAmelCase : Union[str, Any] = randn_tensor(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
lowerCAmelCase : Any = image.to(device=self.device , dtype=snake_case__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case__ , device=self.device )
lowerCAmelCase : Tuple = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Tuple = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(snake_case__ ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase : Optional[Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : Optional[int] = self.unet(snake_case__ , snake_case__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Union[str, Any] = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase : str = self.vqvae.decode(snake_case__ ).sample
lowerCAmelCase : Optional[Any] = torch.clamp(snake_case__ , -1.0 , 1.0 )
lowerCAmelCase : int = image / 2 + 0.5
lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __UpperCamelCase ( _A : Any , _A : List[Any] , _A : Optional[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase : int = [0] * no_of_processes
lowerCAmelCase : Union[str, Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCamelCase__ ):
lowerCAmelCase : Dict = burst_time[i]
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[Any] = 9_99_99_99_99
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Dict = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase : str = remaining_time[j]
lowerCAmelCase : Optional[Any] = j
lowerCAmelCase : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase : str = remaining_time[short]
if minm == 0:
lowerCAmelCase : List[Any] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase : Optional[Any] = False
# Find finish time of current process
lowerCAmelCase : str = increment_time + 1
# Calculate waiting time
lowerCAmelCase : str = finish_time - arrival_time[short]
lowerCAmelCase : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase : str = 0
# Increment time
increment_time += 1
return waiting_time
def __UpperCamelCase ( _A : Optional[Any] , _A : Tuple , _A : Optional[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase : List[Any] = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
lowerCAmelCase : Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def __UpperCamelCase ( _A : Union[str, Any] , _A : str , _A : Any ) -> None:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[str] = 0
for i in range(lowerCamelCase__ ):
lowerCAmelCase : Tuple = total_waiting_time + waiting_time[i]
lowerCAmelCase : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
_lowerCAmelCase : int = int(input())
_lowerCAmelCase : List[Any] = [0] * no_of_processes
_lowerCAmelCase : Union[str, Any] = [0] * no_of_processes
_lowerCAmelCase : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
_lowerCAmelCase , _lowerCAmelCase : int = map(int, input().split())
_lowerCAmelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase : Optional[int] = burst_time
_lowerCAmelCase : Union[str, Any] = no_of_processes
_lowerCAmelCase : Optional[int] = waiting_time
_lowerCAmelCase : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowerCAmelCase : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase ( unittest.TestCase ):
_lowerCamelCase : Any = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
_lowerCamelCase : Optional[int] = ["""accelerate""", """launch"""]
_lowerCamelCase : int = Path.home() / """.cache/huggingface/accelerate"""
_lowerCamelCase : List[Any] = """default_config.yaml"""
_lowerCamelCase : Any = config_folder / config_file
_lowerCamelCase : List[str] = config_folder / """_default_config.yaml"""
_lowerCamelCase : int = Path("""tests/test_configs""" )
@classmethod
def lowercase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase ( self ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=snake_case__ ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(snake_case__ ), self.test_file_path] , env=os.environ.copy() )
def lowercase ( self ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowerCAmelCase ( unittest.TestCase ):
_lowerCamelCase : int = """test-tpu"""
_lowerCamelCase : Tuple = """us-central1-a"""
_lowerCamelCase : Optional[int] = """ls"""
_lowerCamelCase : Union[str, Any] = ["""accelerate""", """tpu-config"""]
_lowerCamelCase : Optional[int] = """cd /usr/share"""
_lowerCamelCase : List[str] = """tests/test_samples/test_command_file.sh"""
_lowerCamelCase : Any = """Running gcloud compute tpus tpu-vm ssh"""
def lowercase ( self ):
lowerCAmelCase : Optional[int] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : Tuple = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : int = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=snake_case__ )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : Tuple = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : Dict = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : List[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : List[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , snake_case__ , )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , snake_case__ , )
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=4 , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : Optional[int] = use_attention_mask
lowerCAmelCase : Any = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_size
lowerCAmelCase : List[Any] = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Dict = num_choices
def lowercase ( self ):
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : str = None
if self.use_attention_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Union[str, Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self ):
lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self ):
lowerCAmelCase : Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowercase ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Any = model_class_name.from_pretrained('albert-base-v2' )
lowerCAmelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ )[0]
lowerCAmelCase : Optional[Any] = (1, 11, 768)
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Any = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCamelCase ( _A : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
lowerCAmelCase : Optional[int] = F"{olid} is not a valid Open Library olid"
raise ValueError(_A )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def __UpperCamelCase ( _A : dict ) -> dict:
"""simple docstring"""
lowerCAmelCase : List[str] = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
lowerCAmelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase : Tuple = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
lowerCAmelCase : Optional[int] = data['First sentence']['value']
for key, value in data.items():
if isinstance(_A , _A ):
lowerCAmelCase : List[Any] = ', '.join(_A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowerCAmelCase : Optional[int] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowerCAmelCase : List[str] = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('\n'.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=[30, 30] , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=None , snake_case__=8 , snake_case__=10 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : int = is_training
lowerCAmelCase : Optional[Any] = use_labels
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Any = scope
lowerCAmelCase : Dict = n_targets
lowerCAmelCase : Tuple = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase : int = num_patches + 1 + self.num_detection_tokens
def lowercase ( self ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase : Optional[int] = []
for i in range(self.batch_size ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case__ )
lowerCAmelCase : List[str] = torch.rand(self.n_targets , 4 , device=snake_case__ )
labels.append(snake_case__ )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = YolosModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = YolosForObjectDetection(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(pixel_values=snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase : str = model(pixel_values=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = config_and_inputs
lowerCAmelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase : str = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = False
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : Any = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case__ , dtype=torch.long )
lowerCAmelCase : Any = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case__ , dtype=torch.float )
labels.append(snake_case__ )
lowerCAmelCase : str = labels
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : int = YolosModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
# YOLOS does not use inputs_embeds
pass
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = True
# in YOLOS, the seq_len is different
lowerCAmelCase : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : int = True
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase : Optional[int] = len(snake_case__ )
# Check attention is always last and order is fine
lowerCAmelCase : Tuple = True
lowerCAmelCase : Any = True
lowerCAmelCase : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case__ ) )
lowerCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase ( self ):
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[Any] = outputs.hidden_states
lowerCAmelCase : Dict = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# YOLOS has a different seq_length
lowerCAmelCase : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[Any] = YolosModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def lowercase ( self ):
lowerCAmelCase : Optional[int] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : List[Any] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase : Optional[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=snake_case__ , )
lowerCAmelCase : Dict = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase : Tuple = image_processor.post_process_object_detection(
snake_case__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase : Dict = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(snake_case__ )
lowerCAmelCase : int = [75, 75, 17, 63, 17]
lowerCAmelCase : str = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(snake_case__ )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , snake_case__ , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , snake_case__ )
self.assertTrue(torch.allclose(results['boxes'][0, :] , snake_case__ ) )
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : str = {'vocab_file': 'spiece.model'}
_lowerCAmelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__=["<eop>", "<eod>"] , snake_case__ = None , **snake_case__ , ):
lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase : List[str] = 3
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Any = remove_space
lowerCAmelCase : Tuple = keep_accents
lowerCAmelCase : Dict = vocab_file
lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
lowerCAmelCase : Optional[Any] = jieba
lowerCAmelCase : List[str] = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase ( self ):
return len(self.sp_model )
def lowercase ( self ):
lowerCAmelCase : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCAmelCase : Tuple = self.__dict__.copy()
lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self , snake_case__ ):
if self.remove_space:
lowerCAmelCase : Optional[Any] = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase : Any = inputs
lowerCAmelCase : Optional[int] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase : List[Any] = unicodedata.normalize('NFKD' , snake_case__ )
lowerCAmelCase : List[str] = ''.join([c for c in outputs if not unicodedata.combining(snake_case__ )] )
if self.do_lower_case:
lowerCAmelCase : Any = outputs.lower()
return outputs
def lowercase ( self , snake_case__ ):
lowerCAmelCase : str = self.preprocess_text(snake_case__ )
lowerCAmelCase : Optional[int] = self.sp_model.encode(snake_case__ , out_type=snake_case__ )
lowerCAmelCase : Any = []
for piece in pieces:
if len(snake_case__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case__ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase : int = cur_pieces[1:]
else:
lowerCAmelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case__ )
else:
new_pieces.append(snake_case__ )
return new_pieces
def lowercase ( self , snake_case__ ):
return self.sp_model.PieceToId(snake_case__ )
def lowercase ( self , snake_case__ ):
return self.sp_model.IdToPiece(snake_case__ )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Union[str, Any] = ''.join(snake_case__ ).replace(snake_case__ , ' ' ).strip()
return out_string
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1, 1]
return ([0] * len(snake_case__ )) + [1, 1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self , snake_case__ , snake_case__ = None ):
if not os.path.isdir(snake_case__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : str = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def lowercase ( self , *snake_case__ , **snake_case__ ):
lowerCAmelCase : str = super()._decode(*snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[int] = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
debug_launcher(test_script.main )
def lowercase ( self ):
debug_launcher(test_ops.main )
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _A : int , _A : int ) -> list[list[int]]:
"""simple docstring"""
lowerCAmelCase : list[list[int]] = []
create_all_state(1 , _A , _A , [] , _A )
return result
def __UpperCamelCase ( _A : int , _A : int , _A : int , _A : list[int] , _A : list[list[int]] , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_A , total_number - level + 2 ):
current_list.append(_A )
create_all_state(i + 1 , _A , level - 1 , _A , _A )
current_list.pop()
def __UpperCamelCase ( _A : list[list[int]] ) -> None:
"""simple docstring"""
for i in total_list:
print(*_A )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : List[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[int] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ['PerceiverFeatureExtractor']
_lowerCAmelCase : Optional[Any] = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
from __future__ import annotations
_lowerCAmelCase : List[Any] = 8.988E9 # units = N * m^s * C^-2
def __UpperCamelCase ( _A : float , _A : float , _A : float , _A : float ) -> dict[str, float]:
"""simple docstring"""
lowerCAmelCase : str = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
lowerCAmelCase : Union[str, Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCAmelCase : Optional[Any] = abs(_A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCAmelCase : List[Any] = abs(_A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCAmelCase : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(_A )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """AutoTokenizer"""
_lowerCamelCase : Tuple = ["""tokenizer"""]
_lowerCamelCase : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , snake_case__ , snake_case__=None ):
super().__init__(snake_case__ )
lowerCAmelCase : List[str] = speaker_embeddings
@classmethod
def lowercase ( cls , snake_case__ , snake_case__="speaker_embeddings_path.json" , **snake_case__ ):
if speaker_embeddings_dict_path is not None:
lowerCAmelCase : Optional[int] = get_file_from_repo(
snake_case__ , snake_case__ , subfolder=kwargs.pop('subfolder' , snake_case__ ) , cache_dir=kwargs.pop('cache_dir' , snake_case__ ) , force_download=kwargs.pop('force_download' , snake_case__ ) , proxies=kwargs.pop('proxies' , snake_case__ ) , resume_download=kwargs.pop('resume_download' , snake_case__ ) , local_files_only=kwargs.pop('local_files_only' , snake_case__ ) , use_auth_token=kwargs.pop('use_auth_token' , snake_case__ ) , revision=kwargs.pop('revision' , snake_case__ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(snake_case__ , snake_case__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase : Optional[int] = None
else:
with open(snake_case__ ) as speaker_embeddings_json:
lowerCAmelCase : Optional[Any] = json.load(snake_case__ )
else:
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(tokenizer=snake_case__ , speaker_embeddings=snake_case__ )
def lowercase ( self , snake_case__ , snake_case__="speaker_embeddings_path.json" , snake_case__="speaker_embeddings" , snake_case__ = False , **snake_case__ , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case__ , snake_case__ , 'v2' ) , exist_ok=snake_case__ )
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase : Union[str, Any] = self._load_voice_preset(snake_case__ )
lowerCAmelCase : Dict = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , snake_case__ , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=snake_case__ , )
lowerCAmelCase : Optional[Any] = os.path.join(snake_case__ , f"{prompt_key}_{key}.npy" )
lowerCAmelCase : Optional[int] = tmp_dict
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
super().save_pretrained(snake_case__ , snake_case__ , **snake_case__ )
def lowercase ( self , snake_case__ = None , **snake_case__ ):
lowerCAmelCase : Optional[Any] = self.speaker_embeddings[voice_preset]
lowerCAmelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase : str = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , snake_case__ ) , cache_dir=kwargs.pop('cache_dir' , snake_case__ ) , force_download=kwargs.pop('force_download' , snake_case__ ) , proxies=kwargs.pop('proxies' , snake_case__ ) , resume_download=kwargs.pop('resume_download' , snake_case__ ) , local_files_only=kwargs.pop('local_files_only' , snake_case__ ) , use_auth_token=kwargs.pop('use_auth_token' , snake_case__ ) , revision=kwargs.pop('revision' , snake_case__ ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase : Any = np.load(snake_case__ )
return voice_preset_dict
def lowercase ( self , snake_case__ = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__="pt" , snake_case__=256 , snake_case__=False , snake_case__=True , snake_case__=False , **snake_case__ , ):
if voice_preset is not None and not isinstance(snake_case__ , snake_case__ ):
if (
isinstance(snake_case__ , snake_case__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase : Any = self._load_voice_preset(snake_case__ )
else:
if isinstance(snake_case__ , snake_case__ ) and not voice_preset.endswith('.npz' ):
lowerCAmelCase : List[str] = voice_preset + '.npz'
lowerCAmelCase : Tuple = np.load(snake_case__ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
lowerCAmelCase : List[Any] = self.tokenizer(
snake_case__ , return_tensors=snake_case__ , padding='max_length' , max_length=snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
if voice_preset is not None:
lowerCAmelCase : Tuple = voice_preset
return encoded_text
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase : str = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase : int = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
_lowerCAmelCase : Dict = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = VOCAB_FILES_NAMES
_lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[Any] = DistilBertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase : Any = getattr(snake_case__ , normalizer_state.pop('type' ) )
lowerCAmelCase : int = do_lower_case
lowerCAmelCase : List[Any] = strip_accents
lowerCAmelCase : List[Any] = tokenize_chinese_chars
lowerCAmelCase : Optional[int] = normalizer_class(**snake_case__ )
lowerCAmelCase : int = do_lower_case
def lowercase ( self , snake_case__ , snake_case__=None ):
lowerCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int] ) -> int:
"""simple docstring"""
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __UpperCamelCase ( _A : Dict , _A : int , _A : Any ) -> str:
"""simple docstring"""
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : List[str] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCamelCase ( _A : int , _A : List[Any] , _A : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase : int = tmp_path / 'cache'
lowerCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
lowerCAmelCase : Tuple = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[Any] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : Optional[int] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
lowerCAmelCase : Any = features.copy() if features else default_expected_features
lowerCAmelCase : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : int = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
lowerCAmelCase : Optional[int] = features.copy()
lowerCAmelCase : int = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Any = tmp_path / 'cache'
lowerCAmelCase : Dict = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _A : Optional[int] , _A : int , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = tmp_path / 'cache'
lowerCAmelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : int = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __UpperCamelCase ( _A : Any , _A : str , _A : Dict ) -> List[str]:
"""simple docstring"""
if issubclass(_A , _A ):
lowerCAmelCase : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase : Optional[Any] = [jsonl_path]
lowerCAmelCase : Any = tmp_path / 'cache'
lowerCAmelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Dict = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A : int , _A : int , _A : Optional[int]=("train",) ) -> int:
"""simple docstring"""
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __UpperCamelCase ( _A : Optional[Any] , _A : int , _A : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path / 'cache'
lowerCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = tmp_path / 'cache'
lowerCAmelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Dict = features.copy() if features else default_expected_features
lowerCAmelCase : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Dict = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] ) -> str:
"""simple docstring"""
if split:
lowerCAmelCase : Optional[Any] = {split: jsonl_path}
else:
lowerCAmelCase : Any = 'train'
lowerCAmelCase : Tuple = {'train': jsonl_path, 'test': jsonl_path}
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : str = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return json.load(_A )
def __UpperCamelCase ( _A : Optional[int] ) -> List[str]:
"""simple docstring"""
return [json.loads(_A ) for line in buffer]
class lowerCAmelCase :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ ).write()
buffer.seek(0 )
lowerCAmelCase : Optional[Any] = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ ).write()
buffer.seek(0 )
lowerCAmelCase : List[str] = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase : Any = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase : List[Any] = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
def lowercase ( self , snake_case__ ):
with pytest.raises(snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / f"test.json.{extension}"
lowerCAmelCase : Optional[int] = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(snake_case__ , snake_case__ , compression=snake_case__ ).write()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
lowerCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
class lowerCAmelCase :
def __init__( self , snake_case__ ):
# we need a list not a string, so do something to change the type
lowerCAmelCase : List[str] = arr.split(',' )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = [int(self.array[0] )] * len(self.array )
lowerCAmelCase : Tuple = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase : Optional[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = input('please input some numbers:')
_lowerCAmelCase : Tuple = SubArray(whole_array)
_lowerCAmelCase : Union[str, Any] = array.solve_sub_array()
print(('the results is:', re))
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase ( a ):
_lowerCamelCase : str = """openai/whisper-base"""
_lowerCamelCase : int = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowerCamelCase : int = """transcriber"""
_lowerCamelCase : List[str] = WhisperProcessor
_lowerCamelCase : Optional[int] = WhisperForConditionalGeneration
_lowerCamelCase : List[Any] = ["""audio"""]
_lowerCamelCase : List[str] = ["""text"""]
def lowercase ( self , snake_case__ ):
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def lowercase ( self , snake_case__ ):
return self.model.generate(inputs=snake_case__ )
def lowercase ( self , snake_case__ ):
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : int = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
def __UpperCamelCase ( _A : List[Any] , _A : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : int = set()
lowerCAmelCase : List[Any] = []
def parse_line(_A : Optional[int] ):
for line in fp:
if isinstance(_A , _A ):
lowerCAmelCase : Optional[int] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_A ) > 0:
lowerCAmelCase : Optional[int] = '\n'.join(_A )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(_A )
buffer.clear()
continue
else:
lowerCAmelCase : int = line.strip()
buffer.append(_A )
if from_gh:
for filename in os.listdir(_A ):
lowerCAmelCase : Optional[int] = os.path.join(_A , _A )
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with open(_A ) as fp:
parse_line(_A )
else:
try:
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_A ) as fp:
parse_line(_A )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __UpperCamelCase ( _A : Dict , _A : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = set()
lowerCAmelCase : Union[str, Any] = [os.path.join(_A , _A ) for p in os.listdir(_A ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_A , _A ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCamelCase ( _A : str ) -> int:
"""simple docstring"""
return values.split(',' )
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
_lowerCAmelCase : Dict = parser.parse_args()
_lowerCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCAmelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCAmelCase : Optional[Any] = extract_warnings(args.output_dir, args.targets)
_lowerCAmelCase : Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
_lowerCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCamelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCamelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowerCamelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCamelCase : bool = field(default=a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase :
_lowerCamelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowerCamelCase : Optional[str] = field(
default=a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowerCamelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCamelCase : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
lowerCAmelCase : Dict = import_module('tasks' )
try:
lowerCAmelCase : List[str] = getattr(_A , model_args.task_type )
lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase : Optional[Any] = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase : Dict[int, str] = dict(enumerate(_A ) )
lowerCAmelCase : Optional[Any] = len(_A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , idalabel=_A , labelaid={label: i for i, label in enumerate(_A )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase : Optional[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase : Dict = (
TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase : Union[str, Any] = (
TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_A : np.ndarray , _A : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase : List[Any] = np.argmax(_A , axis=2 )
lowerCAmelCase : List[Any] = preds.shape
lowerCAmelCase : str = [[] for _ in range(_A )]
lowerCAmelCase : Optional[int] = [[] for _ in range(_A )]
for i in range(_A ):
for j in range(_A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_A : EvalPrediction ) -> Dict:
lowerCAmelCase : Optional[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_A , _A ),
"precision": precision_score(_A , _A ),
"recall": recall_score(_A , _A ),
"f1": fa_score(_A , _A ),
}
# Data collator
lowerCAmelCase : Optional[int] = DataCollatorWithPadding(_A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : List[str] = Trainer(
model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , data_collator=_A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : str = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase : Union[str, Any] = trainer.evaluate()
lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _A , _A )
writer.write('%s = %s\n' % (key, value) )
results.update(_A )
# Predict
if training_args.do_predict:
lowerCAmelCase : Optional[Any] = TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase : Any = trainer.predict(_A )
lowerCAmelCase : List[Any] = align_predictions(_A , _A )
lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _A , _A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowerCAmelCase : List[Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_A , _A , _A )
return results
def __UpperCamelCase ( _A : Optional[Any] ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase : List[Any] = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 256
# Modulus to hash a string
_lowerCAmelCase : List[Any] = 100_0003
def __UpperCamelCase ( _A : str , _A : str ) -> bool:
"""simple docstring"""
lowerCAmelCase : Tuple = len(_A )
lowerCAmelCase : List[Any] = len(_A )
if p_len > t_len:
return False
lowerCAmelCase : Any = 0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
lowerCAmelCase : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : List[str] = 'abc1abc12'
lowerCAmelCase : Dict = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCAmelCase : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
lowerCAmelCase : str = 'ABABX'
lowerCAmelCase : Optional[Any] = 'ABABZABABYABABX'
assert rabin_karp(_A , _A )
# Test 3)
lowerCAmelCase : int = 'AAAB'
lowerCAmelCase : int = 'ABAAAAAB'
assert rabin_karp(_A , _A )
# Test 4)
lowerCAmelCase : List[str] = 'abcdabcy'
lowerCAmelCase : Tuple = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_A , _A )
# Test 5)
lowerCAmelCase : List[Any] = 'Lü'
lowerCAmelCase : Optional[Any] = 'Lüsai'
assert rabin_karp(_A , _A )
lowerCAmelCase : Optional[Any] = 'Lue'
assert not rabin_karp(_A , _A )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def __UpperCamelCase ( _A : int , _A : int , _A : int ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase : str = _calculate(days - 1 , _A , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase : int = _calculate(days - 1 , _A , 0 )
lowerCAmelCase : Any = state_late + state_absent + state_ontime
lowerCAmelCase : Any = prizestrings
return prizestrings
def __UpperCamelCase ( _A : int = 30 ) -> int:
"""simple docstring"""
return _calculate(_A , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_lowerCAmelCase : Optional[int] = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
_lowerCAmelCase : List[Any] = '▁'
class lowerCAmelCase ( a ):
_lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase : Dict = vocab_file
lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase : Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCAmelCase : List[str] = len(self.sp_model ) - 1
lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase ( self , snake_case__ , snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : int = [self.cls_token_id]
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self ):
return len(self.sp_model )
def lowercase ( self ):
lowerCAmelCase : int = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self , snake_case__ ):
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase ( self , snake_case__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : Dict = self.sp_model.PieceToId(snake_case__ )
return spm_id if spm_id else self.unk_token_id
def lowercase ( self , snake_case__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case__ )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Optional[int] = ''
lowerCAmelCase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
lowerCAmelCase : str = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __getstate__( self ):
lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , snake_case__ ):
lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self , snake_case__ , snake_case__ = None ):
if not os.path.isdir(snake_case__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : List[str] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
import re
def __UpperCamelCase ( _A : str ) -> bool:
"""simple docstring"""
lowerCAmelCase : Tuple = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(_A , _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : Optional[int] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_lowerCAmelCase : Tuple = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_lowerCAmelCase : Optional[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = 1 , snake_case__ = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ )
}
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = str(_A )
while len(_A ) != 1:
lowerCAmelCase : int = [int(_A ) for i in num_string]
lowerCAmelCase : List[str] = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
lowerCAmelCase : Any = str(_A )
steps += 1
return steps
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[Any] = str(_A )
while len(_A ) != 1:
lowerCAmelCase : str = [int(_A ) for i in num_string]
lowerCAmelCase : List[Any] = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
lowerCAmelCase : Any = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCamelCase ( _A : str , _A : str = "cpu" , _A : Union[str, None] = None ) -> None:
"""simple docstring"""
lowerCAmelCase : List[Any] = torch.load(_A , map_location=_A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_A , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
lowerCAmelCase : Optional[int] = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase : Dict = src_path
torch.save(_A , _A )
if __name__ == "__main__":
fire.Fire(convert)
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
def __UpperCamelCase ( _A : int , _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCAmelCase : Any = n - k
# Calculate C(n,k)
for i in range(_A ):
result *= n - i
result //= i + 1
return result
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , _A ) // (node_count + 1)
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCAmelCase : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return catalan_number(_A ) * factorial(_A )
if __name__ == "__main__":
_lowerCAmelCase : int = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCAmelCase : Dict = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __UpperCamelCase ( _A : str , _A : Dict ) -> Tuple:
"""simple docstring"""
inspect_dataset(_A , _A )
lowerCAmelCase : str = path + '.py'
assert script_name in os.listdir(_A )
assert "__pycache__" not in os.listdir(_A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __UpperCamelCase ( _A : List[str] , _A : str ) -> Optional[int]:
"""simple docstring"""
inspect_metric(_A , _A )
lowerCAmelCase : Tuple = path + '.py'
assert script_name in os.listdir(_A )
assert "__pycache__" not in os.listdir(_A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCamelCase ( _A : Optional[int] , _A : str , _A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = get_dataset_config_info(_A , config_name=_A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCamelCase ( _A : List[Any] , _A : Tuple , _A : int ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(_A ):
get_dataset_config_info(_A , config_name=_A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __UpperCamelCase ( _A : Tuple , _A : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase : Dict = get_dataset_config_names(_A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __UpperCamelCase ( _A : Dict , _A : List[Any] , _A : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase : Optional[int] = get_dataset_infos(_A )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase : Union[str, Any] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = get_dataset_infos(_A )
assert expected_config in infos
lowerCAmelCase : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCamelCase ( _A : str , _A : int , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
with pytest.raises(_A ):
get_dataset_split_names(_A , config_name=_A )
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : int , _A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger()
@dataclass
class lowerCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=a )
_lowerCamelCase : list = field(default_factory=a )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self , snake_case__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def lowercase ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 0
_lowerCamelCase : List = field(default_factory=a )
_lowerCamelCase : List = field(default_factory=a )
def __call__( self , snake_case__ ):
lowerCAmelCase : Union[str, Any] = Tracker(self.dest )(snake_case__ ).parametrized
lowerCAmelCase : Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowerCAmelCase : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowerCAmelCase : List[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(snake_case__ )} operations while"
f" destination module has {len(snake_case__ )}." )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __UpperCamelCase ( _A : str , _A : ResNetConfig , _A : Path , _A : bool = True ) -> Optional[Any]:
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
lowerCAmelCase : Any = timm.create_model(_A , pretrained=_A ).eval()
lowerCAmelCase : Optional[int] = ResNetForImageClassification(_A ).eval()
lowerCAmelCase : Optional[Any] = ModuleTransfer(src=_A , dest=_A )
lowerCAmelCase : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_A )
assert torch.allclose(from_model(_A ) , our_model(_A ).logits ), "The model logits don't match the original one."
lowerCAmelCase : Union[str, Any] = F"resnet{'-'.join(name.split('resnet' ) )}"
print(_A )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_A , )
# we can use the convnext one
lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_A , )
print(F"Pushed {checkpoint_name}" )
def __UpperCamelCase ( _A : Path , _A : str = None , _A : bool = True ) -> str:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Dict = 10_00
lowerCAmelCase : List[Any] = (1, num_labels)
lowerCAmelCase : Any = 'huggingface/label-files'
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase : Any = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : Tuple = idalabel
lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase : Dict = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
lowerCAmelCase : str = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_A , names_to_config[model_name] , _A , _A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_A , _A , _A , _A )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_lowerCAmelCase : List[Any] = parser.parse_args()
_lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """time_series_transformer"""
_lowerCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 64 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.0_2 , snake_case__=True , **snake_case__ , ):
# time series specific configuration
lowerCAmelCase : Tuple = prediction_length
lowerCAmelCase : List[Any] = context_length or prediction_length
lowerCAmelCase : int = distribution_output
lowerCAmelCase : Dict = loss
lowerCAmelCase : Tuple = input_size
lowerCAmelCase : List[Any] = num_time_features
lowerCAmelCase : Any = lags_sequence
lowerCAmelCase : Tuple = scaling
lowerCAmelCase : str = num_dynamic_real_features
lowerCAmelCase : str = num_static_real_features
lowerCAmelCase : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase : List[Any] = cardinality
else:
lowerCAmelCase : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase : Optional[Any] = embedding_dimension
else:
lowerCAmelCase : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : int = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Union[str, Any] = input_size * len(snake_case__ ) + self._number_of_features
lowerCAmelCase : Optional[Any] = d_model
lowerCAmelCase : Union[str, Any] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : Any = decoder_ffn_dim
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Tuple = decoder_layers
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : Optional[Any] = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : Union[str, Any] = decoder_layerdrop
lowerCAmelCase : List[str] = activation_function
lowerCAmelCase : List[str] = init_std
lowerCAmelCase : Tuple = use_cache
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=19 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Optional[Any] = type_sequence_label_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : List[str] = num_choices
lowerCAmelCase : Optional[int] = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : int = None
lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
lowerCAmelCase : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case__ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = EsmForProteinFolding(config=snake_case__ ).float()
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ )
lowerCAmelCase : Any = model(snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : str = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : List[str] = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = ()
_lowerCamelCase : Tuple = {} if is_torch_available() else {}
_lowerCamelCase : int = False
def lowercase ( self ):
lowerCAmelCase : Optional[int] = EsmFoldModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip('Does not support attention outputs' )
def lowercase ( self ):
pass
@unittest.skip
def lowercase ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase ( self ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase ( self ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase ( self ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self ):
pass
@require_torch
class lowerCAmelCase ( a ):
@slow
def lowercase ( self ):
lowerCAmelCase : Optional[int] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
lowerCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase : Optional[Any] = model(snake_case__ )['positions']
lowerCAmelCase : Any = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case__ , atol=1e-4 ) )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( _A : Sequence[float] , _A : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_A ) )
def __UpperCamelCase ( _A : Sequence[float] , _A : float ) -> float:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = 0.0
for coeff in reversed(_A ):
lowerCAmelCase : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCAmelCase : Union[str, Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.