code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Tuple ) -> List[Any]:
for attribute in key.split('''.''' ):
lowercase : List[str] =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Union[str, Any] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : Union[str, Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Tuple =value
elif weight_type == "weight_g":
lowercase : Union[str, Any] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Dict =value
else:
lowercase : Union[str, Any] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Union[str, Any] =fairseq_model.state_dict()
lowercase : Tuple =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase : Union[str, Any] =None
for name, value in fairseq_dict.items():
lowercase : Any =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Tuple =True
elif name.split('''.''' )[0] == "proj":
lowercase : List[Any] =fairseq_model.proj
lowercase : Union[str, Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Tuple =True
if "*" in mapped_key:
lowercase : Any =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : Dict =mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
lowercase : Optional[int] ='''weight_g'''
elif "weight_v" in name:
lowercase : Dict ='''weight_v'''
elif "bias" in name:
lowercase : Optional[Any] ='''bias'''
elif "weight" in name:
lowercase : Any ='''weight'''
else:
lowercase : Dict =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
lowercase : str =full_name.split('''conv_layers.''' )[-1]
lowercase : Optional[Any] =name.split('''.''' )
lowercase : List[Any] =int(items[0] )
lowercase : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : Dict =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[int]:
lowercase , lowercase : List[Any] =emb.weight.shape
lowercase : Optional[Any] =nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase : Any =emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Dict =f.readlines()
lowercase : Optional[int] =[line.split(''' ''' )[0] for line in lines]
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__magic_name__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int , ) -> List[str]:
lowercase : List[str] =WavaVecaConfig.from_pretrained(__magic_name__ )
lowercase : int =SpeechaTextaConfig.from_pretrained(
__magic_name__ , vocab_size=__magic_name__ , decoder_layers=__magic_name__ , do_stable_layer_norm=__magic_name__ )
lowercase : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase , lowercase , lowercase : Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase : Union[str, Any] =model[0].eval()
# set weights for wav2vec2 encoder
lowercase : Union[str, Any] =WavaVecaModel(__magic_name__ )
lowercase : Union[str, Any] =recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
lowercase : Optional[int] =SpeechaTextaForCausalLM(__magic_name__ )
lowercase , lowercase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase : List[Any] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase : Optional[Any] =SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
lowercase : Tuple =False
# add projection layer
lowercase : Union[str, Any] =nn.Parameter(projection_layer.weight )
lowercase : Dict =nn.Parameter(projection_layer.bias )
lowercase : str =create_vocab_dict(__magic_name__ )
with open(os.path.join(__magic_name__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
lowercase : Dict =SpeechaTextaTokenizer(os.path.join(__magic_name__ , '''vocab.json''' ) )
tokenizer.save_pretrained(__magic_name__ )
lowercase : Optional[Any] =hf_wavavec.config.to_dict()
lowercase : List[Any] =tokenizer.pad_token_id
lowercase : Any =tokenizer.bos_token_id
lowercase : Any =tokenizer.eos_token_id
lowercase : int ='''speech_to_text_2'''
lowercase : int ='''wav2vec2'''
lowercase : Optional[Any] =SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
import sys
UpperCamelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase ( __magic_name__ : str = N ) -> int:
lowercase : Dict =-sys.maxsize - 1
for i in range(len(__magic_name__ ) - 12 ):
lowercase : Any =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase : int =product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['input_ids', 'attention_mask']
lowerCamelCase_ = None
def __init__( self : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : List[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
lowercase : int =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
lowercase : int =add_prefix_space
lowercase : Optional[Any] =pre_tok_class(**UpperCAmelCase__ )
lowercase : Optional[Any] =add_prefix_space
def lowerCamelCase_ ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : Dict =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : "Conversation" ):
'''simple docstring'''
lowercase : Any =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowercase : Any =input_ids[-self.model_max_length :]
return input_ids
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] | None = None ) -> list[list[str]]:
lowercase : int =word_bank or []
# create a table
lowercase : int =len(__magic_name__ ) + 1
lowercase : list[list[list[str]]] =[]
for _ in range(__magic_name__ ):
table.append([] )
# seed value
lowercase : Union[str, Any] =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(__magic_name__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__magic_name__ )] == word:
lowercase : list[list[str]] =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__magic_name__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__magic_name__ )]:
combination.reverse()
return table[len(__magic_name__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any =processors[data_args.task_name]()
lowercase : Optional[int] =processor.get_labels()
lowercase : str =len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict:
lowercase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : Dict =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate()
lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
lowercase : Dict =len(bin(__magic_name__ )[3:] )
lowercase : List[Any] =bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
lowercase : List[str] =(
(
'''1'''
+ '''0''' * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]:
lowercase : list[list[int]] =[]
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> None:
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = 4
UpperCamelCase_ = 2
UpperCamelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'fnet'
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=32000 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[str]=3072 , UpperCAmelCase__ : Union[str, Any]="gelu_new" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Optional[Any]=2 , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Optional[Any] =vocab_size
lowercase : Optional[int] =max_position_embeddings
lowercase : Optional[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : int =intermediate_size
lowercase : Any =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =initializer_range
lowercase : Dict =type_vocab_size
lowercase : str =layer_norm_eps
lowercase : Optional[int] =use_tpu_fourier_optimizations
lowercase : str =tpu_short_seq_length
| 92 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
lowercase : Dict =SwinConfig(image_size=192 )
if "base" in model_name:
lowercase : Any =6
lowercase : Dict =128
lowercase : List[Any] =(2, 2, 18, 2)
lowercase : Optional[int] =(4, 8, 16, 32)
elif "large" in model_name:
lowercase : int =12
lowercase : str =192
lowercase : int =(2, 2, 18, 2)
lowercase : Optional[int] =(6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase : List[str] =window_size
lowercase : Dict =embed_dim
lowercase : Optional[int] =depths
lowercase : Union[str, Any] =num_heads
return config
def _lowerCAmelCase ( __magic_name__ : int ) -> Dict:
if "encoder.mask_token" in name:
lowercase : Any =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase : Dict =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase : Dict =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase : Any =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : List[str] =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Optional[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : Dict =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[str] ='''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : List[str] ='''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase : Any ='''swin.''' + name
return name
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase : str =orig_state_dict.pop(__magic_name__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase : List[str] =key.split('''.''' )
lowercase : Any =int(key_split[2] )
lowercase : List[str] =int(key_split[4] )
lowercase : int =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Dict =val[:dim, :]
lowercase : Optional[int] =val[
dim : dim * 2, :
]
lowercase : str =val[-dim:, :]
else:
lowercase : Any =val[
:dim
]
lowercase : Optional[int] =val[
dim : dim * 2
]
lowercase : Any =val[
-dim:
]
else:
lowercase : Any =val
return orig_state_dict
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Tuple:
lowercase : Tuple =torch.load(__magic_name__ , map_location='''cpu''' )['''model''']
lowercase : Union[str, Any] =get_swin_config(__magic_name__ )
lowercase : int =SwinForMaskedImageModeling(__magic_name__ )
model.eval()
lowercase : List[Any] =convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
lowercase : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[Any] =ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowercase : List[str] =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : str =image_processor(images=__magic_name__ , return_tensors='''pt''' )
with torch.no_grad():
lowercase : List[str] =model(**__magic_name__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 92 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any]=False ) -> int:
try:
lowercase : Union[str, Any] =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase : Tuple =default
else:
# KEY is set, convert it to True or False.
try:
lowercase : List[str] =strtobool(__magic_name__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCamelCase_ = parse_flag_from_env("""RUN_SLOW""", default=False)
def _lowerCAmelCase ( __magic_name__ : str ) -> Any:
return unittest.skip('''Test was skipped''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> int:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Optional[Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> str:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> int:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> int:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str]=None , __magic_name__ : str=None ) -> Any:
if test_case is None:
return partial(__magic_name__ , version=__magic_name__ )
return unittest.skipUnless(is_torch_version('''>=''' , __magic_name__ ) , f'''test requires torch version >= {version}''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> Tuple:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__magic_name__ )
UpperCamelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowerCAmelCase ( __magic_name__ : Any ) -> List[str]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__magic_name__ )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = True
@classmethod
def lowerCamelCase_ ( cls : Optional[int] ):
'''simple docstring'''
lowercase : str =tempfile.mkdtemp()
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
lowercase : Optional[Any] =mocks if isinstance(UpperCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Dict:
lowercase : List[Any] =AcceleratorState()
lowercase : Optional[Any] =tensor[None].clone().to(state.device )
lowercase : int =gather(__magic_name__ ).cpu()
lowercase : Tuple =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __magic_name__ ):
return False
return True
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =returncode
lowercase : Optional[Any] =stdout
lowercase : Union[str, Any] =stderr
async def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple ) -> Optional[Any]:
while True:
lowercase : Dict =await stream.readline()
if line:
callback(__magic_name__ )
else:
break
async def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=False , __magic_name__ : List[Any]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(__magic_name__ ) )
lowercase : Any =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__magic_name__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__magic_name__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase : Optional[Any] =[]
lowercase : Dict =[]
def tee(__magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any="" ):
lowercase : Dict =line.decode('''utf-8''' ).rstrip()
sink.append(__magic_name__ )
if not quiet:
print(__magic_name__ , __magic_name__ , file=__magic_name__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__magic_name__ , )
return _RunOutput(await p.wait() , __magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , __magic_name__ : str=180 , __magic_name__ : Optional[int]=False , __magic_name__ : Any=True ) -> _RunOutput:
lowercase : Optional[int] =asyncio.get_event_loop()
lowercase : Any =loop.run_until_complete(
_stream_subprocess(__magic_name__ , env=__magic_name__ , stdin=__magic_name__ , timeout=__magic_name__ , quiet=__magic_name__ , echo=__magic_name__ ) )
lowercase : Union[str, Any] =''' '''.join(__magic_name__ )
if result.returncode > 0:
lowercase : int ='''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
pass
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : List[str]=False ) -> List[str]:
try:
lowercase : int =subprocess.check_output(__magic_name__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__magic_name__ , '''decode''' ):
lowercase : List[str] =output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(__magic_name__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 92 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict:
lowercase : List[str] =R'''\w+[.]\d+'''
lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str:
lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : str =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase : Optional[Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase : Dict =flatten_dict(__magic_name__ )
lowercase : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict =rename_key(__magic_name__ )
lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase : Tuple =jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 92 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
lowerCamelCase_ = 1_00_00
lowerCamelCase_ = None
lowerCamelCase_ = None
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
lowerCamelCase_ = ParquetConfig
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase : List[str] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__ , (str, list, tuple) ):
lowercase : Dict =data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Tuple =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase : int =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase__ ):
with open(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : Any =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase : Dict =table_cast(UpperCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
with open(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : Dict =pq.ParquetFile(UpperCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowercase : int =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase__ )}: {e}''' )
raise
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =''''''
lowercase : Tuple =''''''
lowercase : Tuple =[]
lowercase : int =0
lowercase : Union[str, Any] =256
lowercase : Optional[int] =0
lowercase : int =0
lowercase : List[str] =0
lowercase : Dict =0
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =cva.imread(UpperCAmelCase__ , 0 )
lowercase : List[Any] =copy.deepcopy(self.img )
lowercase , lowercase , lowercase : Union[str, Any] =plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowercase : Optional[Any] =np.sum(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
lowercase : List[str] =x[i] / self.k
self.sk += prk
lowercase : List[Any] =(self.L - 1) * self.sk
if self.rem != 0:
lowercase : Optional[Any] =int(last % last )
lowercase : str =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase__ )
lowercase : Optional[int] =int(np.ma.count(self.img ) / self.img[1].size )
lowercase : Optional[int] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase : Optional[Any] =self.img[j][i]
if num != self.last_list[num]:
lowercase : List[str] =self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 92 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : Dict =0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase : Any =''''''
lowercase : Optional[int] =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__magic_name__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase , lowercase : Tuple =0, 0
# length[i] shows the length of palindromic substring with center i
lowercase : Any =[1 for i in range(len(__magic_name__ ) )]
# for each character in new_string find corresponding palindromic string
lowercase : Dict =0
for j in range(len(__magic_name__ ) ):
lowercase : Optional[int] =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__magic_name__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase : Optional[Any] =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase : Optional[Any] =j - k + 1 # noqa: E741
lowercase : Tuple =j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase : int =length[j]
lowercase : Optional[Any] =j
# create that string
lowercase : Dict =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowercase : Union[str, Any] =EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowercase : Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase__ )
lowercase : List[Any] =CLIPTextModelWithProjection(UpperCAmelCase__ )
lowercase : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase__ )
lowercase : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : Optional[int] =image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : str =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Dict =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
lowercase : List[str] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Any =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Dict =np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Optional[Any] =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
lowercase : int =sd_pipe.to(UpperCAmelCase__ )
lowercase : Optional[int] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[int] =3 * ['''this is a negative prompt''']
lowercase : Any =negative_prompt
lowercase : Optional[int] =3 * [inputs['''prompt''']]
lowercase : Optional[Any] =sd_pipe(**UpperCAmelCase__ )
lowercase : List[Any] =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : List[Any] =3 * ['''this is a negative prompt''']
lowercase : Dict =3 * [inputs.pop('''prompt''' )]
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Any =sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : List[str] =sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
lowercase : List[str] =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]="cpu" , UpperCAmelCase__ : Tuple=torch.floataa , UpperCAmelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
lowercase : str =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : List[str] =np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowercase : int =torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
lowercase : Optional[Any] ={
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : str =self.get_inputs(UpperCAmelCase__ )
lowercase : List[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : int =np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowerCAmelCase ( __magic_name__ : List[DatasetType] , __magic_name__ : Optional[List[float]] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[DatasetInfo] = None , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__magic_name__ ):
if not isinstance(__magic_name__ , (Dataset, IterableDataset) ):
if isinstance(__magic_name__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__magic_name__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__magic_name__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__magic_name__ ).__name__}.''' )
if i == 0:
lowercase , lowercase : str =(
(Dataset, IterableDataset) if isinstance(__magic_name__ , __magic_name__ ) else (IterableDataset, Dataset)
)
elif not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__magic_name__ , __magic_name__ , __magic_name__ , info=__magic_name__ , split=__magic_name__ , stopping_strategy=__magic_name__ )
else:
return _interleave_iterable_datasets(
__magic_name__ , __magic_name__ , __magic_name__ , info=__magic_name__ , split=__magic_name__ , stopping_strategy=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[DatasetType] , __magic_name__ : Optional[DatasetInfo] = None , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : int = 0 , ) -> DatasetType:
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__magic_name__ ):
if not isinstance(__magic_name__ , (Dataset, IterableDataset) ):
if isinstance(__magic_name__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__magic_name__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__magic_name__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__magic_name__ ).__name__}.''' )
if i == 0:
lowercase , lowercase : Optional[int] =(
(Dataset, IterableDataset) if isinstance(__magic_name__ , __magic_name__ ) else (IterableDataset, Dataset)
)
elif not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__magic_name__ , info=__magic_name__ , split=__magic_name__ , axis=__magic_name__ )
else:
return _concatenate_iterable_datasets(__magic_name__ , info=__magic_name__ , split=__magic_name__ , axis=__magic_name__ )
| 92 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[list[int]]:
lowercase : list[list[int]] =[]
lowercase : list[int] =[]
lowercase : Any =0
lowercase : str =sum(__magic_name__ )
create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return result
def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , __magic_name__ : int , ) -> None:
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ , len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , )
UpperCamelCase_ = [3, 34, 4, 12, 5, 2]
UpperCamelCase_ = 9
UpperCamelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase_ = ["""bert-base-uncased""", """bert-base-cased"""]
UpperCamelCase_ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( tf.keras.Model ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
super().__init__()
lowercase : Dict =tokenizer
lowercase : str =AutoConfig.from_pretrained(UpperCAmelCase__ )
lowercase : str =TFAutoModel.from_config(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer(UpperCAmelCase__ )
lowercase : int =self.bert(**UpperCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
lowercase : str =[
BertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowercase : Tuple =[TFBertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCAmelCase__ , use_fast_bert_tokenizer=UpperCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase : Any =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowercase : Optional[Any] =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase : Tuple =tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowercase : Optional[int] =tf_tokenizer(UpperCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : int =tf_tokenizer(self.paired_sentences )
lowercase : Dict =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : Any =tf.function(UpperCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase : int =tf.constant(UpperCAmelCase__ )
lowercase : List[str] =compiled_tokenizer(UpperCAmelCase__ )
lowercase : Tuple =tf_tokenizer(UpperCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : str =ModelToSave(tokenizer=UpperCAmelCase__ )
lowercase : List[str] =tf.convert_to_tensor(self.test_sentences )
lowercase : List[Any] =model(UpperCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase : List[str] =Path(UpperCAmelCase__ ) / '''saved.model'''
model.save(UpperCAmelCase__ )
lowercase : Dict =tf.keras.models.load_model(UpperCAmelCase__ )
lowercase : Dict =loaded_model(UpperCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 92 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase : List[Any] =sorted(string.lower() )
return len(__magic_name__ ) == len(set(__magic_name__ ) )
if __name__ == "__main__":
UpperCamelCase_ = input("""Enter a string """).strip()
UpperCamelCase_ = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 92 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ):
'''simple docstring'''
lowercase : List[str] =parent
lowercase : Dict =batch_size
lowercase : List[str] =seq_length
lowercase : List[str] =is_training
lowercase : Union[str, Any] =use_input_mask
lowercase : str =use_token_type_ids
lowercase : List[Any] =use_labels
lowercase : List[Any] =vocab_size
lowercase : Tuple =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : str =hidden_act
lowercase : Any =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : str =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : List[Any] =initializer_range
lowercase : Optional[Any] =num_labels
lowercase : Any =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] =None
if self.use_input_mask:
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : str =None
lowercase : int =None
lowercase : Any =None
if self.use_labels:
lowercase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[str] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase : Tuple =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : Optional[Any] =True
lowercase : Optional[Any] =OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowercase : Optional[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : List[str] =OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : int =True
lowercase : List[str] =True
lowercase : Tuple =OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowercase : Union[str, Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Any =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : int =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : Dict =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
lowercase : List[Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Any =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Tuple =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str =config_and_inputs
lowercase : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =OpenLlamaModelTester(self )
lowercase : Dict =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Dict =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] =3
lowercase : List[Any] =input_dict['''input_ids''']
lowercase : Any =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : Optional[Any] =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict =3
lowercase : List[str] ='''single_label_classification'''
lowercase : Tuple =input_dict['''input_ids''']
lowercase : Dict =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : Dict =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] =3
lowercase : Optional[int] ='''multi_label_classification'''
lowercase : Dict =input_dict['''input_ids''']
lowercase : Optional[int] =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : Dict =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase : str =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] =ids_tensor([1, 10] , config.vocab_size )
lowercase : Union[str, Any] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : Dict =OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
lowercase : Optional[Any] =original_model(UpperCAmelCase__ ).last_hidden_state
lowercase : Tuple =original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : Optional[int] ={'''type''': scaling_type, '''factor''': 10.0}
lowercase : Tuple =OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
lowercase : Optional[Any] =scaled_model(UpperCAmelCase__ ).last_hidden_state
lowercase : Optional[int] =scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
| 92 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase_ = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase_ = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase_ = df.iloc[:, 1:2]
UpperCamelCase_ = actual_data.values.reshape(len_data, 1)
UpperCamelCase_ = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase_ = 10
UpperCamelCase_ = 5
UpperCamelCase_ = 20
UpperCamelCase_ = len_data - periods * look_back
UpperCamelCase_ = actual_data[:division]
UpperCamelCase_ = actual_data[division - look_back :]
UpperCamelCase_ , UpperCamelCase_ = [], []
UpperCamelCase_ , UpperCamelCase_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase_ = np.array(train_x)
UpperCamelCase_ = np.array(test_x)
UpperCamelCase_ = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase_ = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase_ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase_ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase_ = model.predict(x_test)
| 92 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase : Any =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase : Optional[Any] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[str] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Optional[Any] =2
lowercase : Optional[int] =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , )
lowercase : int =floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Tuple =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Optional[int] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase : Optional[int] =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Dict =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Optional[int] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str =MultiControlNetModel([controlneta, controlneta] )
lowercase : Tuple ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Optional[int] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =2
lowercase : Dict =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
]
lowercase : List[str] =floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Optional[Any] =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Optional[int] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
lowercase : Dict =10.0
lowercase : str =4
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =steps
lowercase : Dict =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : Union[str, Any] =scale
lowercase : int =pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : str =scale
lowercase : Tuple =pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[Any] =steps
lowercase : int =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.get_dummy_components()
lowercase : List[str] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Any =ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase : List[Any] =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : List[Any] ='''evil space-punk bird'''
lowercase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase : Any =load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase : Union[str, Any] =pipe(
UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
lowercase : Optional[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any =processors[data_args.task_name]()
lowercase : Optional[int] =processor.get_labels()
lowercase : str =len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict:
lowercase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : Dict =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate()
lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase : List[str] =f'''Input value of [number={number}] must be an integer'''
raise TypeError(__magic_name__ )
if number < 1:
lowercase : List[str] =f'''Input value of [number={number}] must be > 0'''
raise ValueError(__magic_name__ )
lowercase : Dict =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( __magic_name__ : Sequence[float] , __magic_name__ : int , __magic_name__ : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowercase : Optional[int] =(low + high) // 2
lowercase , lowercase , lowercase : Optional[Any] =max_subarray(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase , lowercase , lowercase : Union[str, Any] =max_subarray(__magic_name__ , mid + 1 , __magic_name__ )
lowercase , lowercase , lowercase : Any =max_cross_sum(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( __magic_name__ : Sequence[float] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> tuple[int, int, float]:
lowercase , lowercase : str =float('''-inf''' ), -1
lowercase , lowercase : List[Any] =float('''-inf''' ), -1
lowercase : int | float =0
for i in range(__magic_name__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowercase : List[Any] =summ
lowercase : Tuple =i
lowercase : str =0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowercase : Optional[Any] =summ
lowercase : Union[str, Any] =i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( __magic_name__ : int ) -> float:
lowercase : str =[randint(1 , __magic_name__ ) for _ in range(__magic_name__ )]
lowercase : Tuple =time.time()
max_subarray(__magic_name__ , 0 , input_size - 1 )
lowercase : Optional[int] =time.time()
return end - start
def _lowerCAmelCase ( ) -> None:
lowercase : Optional[int] =[10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
lowercase : List[Any] =[time_max_subarray(__magic_name__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(__magic_name__ , __magic_name__ ):
print(__magic_name__ , '''\t\t''' , __magic_name__ )
plt.plot(__magic_name__ , __magic_name__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCamelCase_ = """sshleifer/bart-tiny-random"""
UpperCamelCase_ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase , *lowercase : List[str] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , *lowercase : List[Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , *lowercase : int =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase , *lowercase : Union[str, Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__ ):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase_ = 3
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
print('''Generating primitive root of p''' )
while True:
lowercase : str =random.randrange(3 , __magic_name__ )
if pow(__magic_name__ , 2 , __magic_name__ ) == 1:
continue
if pow(__magic_name__ , __magic_name__ , __magic_name__ ) == 1:
continue
return g
def _lowerCAmelCase ( __magic_name__ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
lowercase : List[Any] =rabin_miller.generate_large_prime(__magic_name__ ) # select large prime number.
lowercase : Optional[Any] =primitive_root(__magic_name__ ) # one primitive root on modulo p.
lowercase : Tuple =random.randrange(3 , __magic_name__ ) # private_key -> have to be greater than 2 for safety.
lowercase : Any =cryptomath.find_mod_inverse(pow(__magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase : Dict =(key_size, e_a, e_a, p)
lowercase : Tuple =(key_size, d)
return public_key, private_key
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : int ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase , lowercase : Dict =generate_key(__magic_name__ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def _lowerCAmelCase ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
from timeit import timeit
UpperCamelCase_ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : Tuple =0
lowercase : Union[str, Any] =len(__magic_name__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : List[str] =len(__magic_name__ ) // 2
lowercase : List[str] =len(__magic_name__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__magic_name__ ) )
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
if len(__magic_name__ ) <= 2:
return True
if s[0] == s[len(__magic_name__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
return s == s[::-1]
def _lowerCAmelCase ( __magic_name__ : str ) -> None:
lowercase : int =f'''all({name}(key) is value for key, value in test_data.items())'''
lowercase : Optional[Any] =f'''from __main__ import test_data, {name}'''
lowercase : int =500000
lowercase : List[str] =timeit(stmt=__magic_name__ , setup=__magic_name__ , number=__magic_name__ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 92 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = """true"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any=82 , __magic_name__ : Any=16 ) -> Optional[int]:
set_seed(42 )
lowercase : Any =RegressionModel()
lowercase : Dict =deepcopy(__magic_name__ )
lowercase : List[Any] =RegressionDataset(length=__magic_name__ )
lowercase : Optional[int] =DataLoader(__magic_name__ , batch_size=__magic_name__ )
model.to(accelerator.device )
lowercase , lowercase : List[str] =accelerator.prepare(__magic_name__ , __magic_name__ )
return model, ddp_model, dataloader
def _lowerCAmelCase ( __magic_name__ : Accelerator , __magic_name__ : str=False ) -> Any:
lowercase : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowercase : Optional[int] =load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__magic_name__ : List[str] ):
lowercase : Optional[Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
with accelerator.main_process_first():
lowercase : List[str] =dataset.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowercase : List[Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ : List[Any] ):
if use_longest:
return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(__magic_name__ , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=16 )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> List[str]:
lowercase : Optional[Any] =Accelerator(dispatch_batches=__magic_name__ , split_batches=__magic_name__ )
lowercase : int =get_dataloader(__magic_name__ , not dispatch_batches )
lowercase : Any =AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__magic_name__ )
lowercase , lowercase : List[Any] =accelerator.prepare(__magic_name__ , __magic_name__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : int ) -> Union[str, Any]:
lowercase : Any =[]
for batch in dataloader:
lowercase , lowercase : Any =batch.values()
with torch.no_grad():
lowercase : Optional[Any] =model(__magic_name__ )
lowercase , lowercase : Any =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase : int =[], []
for logit, targ in logits_and_targets:
logits.append(__magic_name__ )
targs.append(__magic_name__ )
lowercase , lowercase : Dict =torch.cat(__magic_name__ ), torch.cat(__magic_name__ )
return logits, targs
def _lowerCAmelCase ( __magic_name__ : Accelerator , __magic_name__ : str=82 , __magic_name__ : List[str]=False , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[int]=16 ) -> Tuple:
lowercase , lowercase , lowercase : int =get_basic_setup(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase , lowercase : Optional[Any] =generate_predictions(__magic_name__ , __magic_name__ , __magic_name__ )
assert (
len(__magic_name__ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__magic_name__ )}'''
def _lowerCAmelCase ( __magic_name__ : bool = False , __magic_name__ : bool = False ) -> List[Any]:
lowercase : Tuple =evaluate.load('''glue''' , '''mrpc''' )
lowercase , lowercase : List[Any] =get_mrpc_setup(__magic_name__ , __magic_name__ )
# First do baseline
lowercase , lowercase , lowercase : Tuple =setup['''no''']
model.to(__magic_name__ )
model.eval()
for batch in dataloader:
batch.to(__magic_name__ )
with torch.inference_mode():
lowercase : Optional[int] =model(**__magic_name__ )
lowercase : Any =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__magic_name__ , references=batch['''labels'''] )
lowercase : int =metric.compute()
# Then do distributed
lowercase , lowercase , lowercase : List[str] =setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase : Tuple =model(**__magic_name__ )
lowercase : List[str] =outputs.logits.argmax(dim=-1 )
lowercase : str =batch['''labels''']
lowercase , lowercase : Optional[int] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__magic_name__ , references=__magic_name__ )
lowercase : Optional[Any] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _lowerCAmelCase ( ) -> List[str]:
lowercase : List[str] =Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__magic_name__ , __magic_name__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase : int =Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__magic_name__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowercase : Optional[Any] =Accelerator()
test_torch_metrics(__magic_name__ , 512 )
accelerator.state._reset_state()
def _lowerCAmelCase ( __magic_name__ : str ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
lowerCamelCase_ = 'nat'
lowerCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : int=[3, 4, 6, 5] , UpperCAmelCase__ : Optional[int]=[2, 4, 8, 16] , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Optional[Any]=3.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-5 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Any =patch_size
lowercase : Tuple =num_channels
lowercase : Tuple =embed_dim
lowercase : str =depths
lowercase : Optional[Any] =len(UpperCAmelCase__ )
lowercase : str =num_heads
lowercase : Optional[Any] =kernel_size
lowercase : List[str] =mlp_ratio
lowercase : Dict =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =drop_path_rate
lowercase : List[str] =hidden_act
lowercase : List[str] =layer_norm_eps
lowercase : Optional[int] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : Union[str, Any] =int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowercase : int =layer_scale_init_value
lowercase : Union[str, Any] =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowercase , lowercase : str =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 92 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict:
lowercase : List[str] =R'''\w+[.]\d+'''
lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str:
lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : str =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase : Optional[Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase : Dict =flatten_dict(__magic_name__ )
lowercase : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict =rename_key(__magic_name__ )
lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase : Tuple =jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
lowercase : Tuple =int(__magic_name__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(__magic_name__ )
lowercase , lowercase : Optional[Any] =divmod(__magic_name__ , 2 )
return binary_recursive(__magic_name__ ) + str(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : List[Any] =str(__magic_name__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase : str ='''-''' if number.startswith('''-''' ) else ''''''
lowercase : List[str] =number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__magic_name__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any]=1024 , __magic_name__ : Dict=1024 , __magic_name__ : Dict=False , **__magic_name__ : List[Any] ) -> str:
lowercase : int =AutoTokenizer.from_pretrained(__magic_name__ )
lowercase : int =SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path='''train''' , **__magic_name__ )
lowercase : List[Any] =tok.pad_token_id
def get_lens(__magic_name__ : List[Any] ):
lowercase : str =tqdm(
DataLoader(__magic_name__ , batch_size=512 , num_workers=8 , shuffle=__magic_name__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase : Optional[int] =[]
for batch in dl:
lowercase : Optional[Any] =batch['''input_ids'''].ne(__magic_name__ ).sum(1 ).tolist()
lowercase : Optional[int] =batch['''labels'''].ne(__magic_name__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__magic_name__ , __magic_name__ ):
max_lens.append(max(__magic_name__ , __magic_name__ ) )
else:
max_lens.extend(__magic_name__ )
return max_lens
lowercase : int =get_lens(__magic_name__ )
lowercase : List[str] =SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path='''val''' , **__magic_name__ )
lowercase : Dict =get_lens(__magic_name__ )
pickle_save(__magic_name__ , train_ds.len_file )
pickle_save(__magic_name__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 92 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =parent
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {}
def _lowerCAmelCase ( ) -> List[str]:
lowercase : int ='''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
lowercase : Dict ='''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize feature_extractor
lowercase : Dict =self.feature_extraction_class()
# Test not batched input
lowercase : List[str] =get_html_strings()[0]
lowercase : str =feature_extractor(UpperCAmelCase__ )
# fmt: off
lowercase : Dict =[['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
lowercase : Any =[['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
lowercase : int =get_html_strings()
lowercase : Union[str, Any] =feature_extractor(UpperCAmelCase__ )
# fmt: off
lowercase : Dict =expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
lowercase : List[str] =expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def _lowerCAmelCase ( __magic_name__ : Accelerator , __magic_name__ : DatasetDict , __magic_name__ : List[int] , __magic_name__ : List[int] , __magic_name__ : int = 16 ) -> Any:
lowercase : int =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : Optional[Any] =DatasetDict(
{
'''train''': dataset['''train'''].select(__magic_name__ ),
'''validation''': dataset['''train'''].select(__magic_name__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(__magic_name__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowercase : int =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Any =datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : str =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Optional[Any] =16
elif accelerator.mixed_precision != "no":
lowercase : Union[str, Any] =8
else:
lowercase : str =None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase : str =DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase : Tuple =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase : int =DataLoader(
tokenized_datasets['''test'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader, test_dataloader
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Optional[Any]:
# New Code #
lowercase : Any =[]
# Download the dataset
lowercase : List[Any] =load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowercase : List[str] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase : Optional[int] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Optional[int] =config['''lr''']
lowercase : Any =int(config['''num_epochs'''] )
lowercase : List[Any] =int(config['''seed'''] )
lowercase : Optional[Any] =int(config['''batch_size'''] )
lowercase : int =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase : Any =batch_size // MAX_GPU_BATCH_SIZE
lowercase : List[str] =MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
# New Code #
# Create our folds:
lowercase : Any =kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowercase : int =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__magic_name__ ):
lowercase , lowercase , lowercase : List[str] =get_fold_dataloaders(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Optional[Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Dict =model.to(accelerator.device )
# Instantiate optimizer
lowercase : List[str] =AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase : Dict =get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] =accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase : List[Any] =model(**__magic_name__ )
lowercase : List[Any] =outputs.loss
lowercase : Dict =loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : List[Any] =model(**__magic_name__ )
lowercase : Any =outputs.logits.argmax(dim=-1 )
lowercase , lowercase : Dict =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase : str =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase : int =[]
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : int =model(**__magic_name__ )
lowercase : int =outputs.logits
lowercase , lowercase : Dict =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__magic_name__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase : List[Any] =torch.cat(__magic_name__ , dim=0 )
lowercase : Tuple =torch.stack(__magic_name__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase : Optional[Any] =metric.compute(predictions=__magic_name__ , references=__magic_name__ )
accelerator.print('''Average test metrics from all folds:''' , __magic_name__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=__magic_name__ , default=3 , help='''The number of splits to perform across the dataset''' )
lowercase : List[Any] =parser.parse_args()
lowercase : Any ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : int =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : List[str] =get_activation('''gelu''' )
lowercase : Optional[int] =get_activation('''gelu_10''' )
lowercase : Union[str, Any] =torch_builtin(UpperCAmelCase__ )
lowercase : int =geluaa(UpperCAmelCase__ )
lowercase : Dict =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =get_activation('''gelu''' )
lowercase : Optional[int] =1
lowercase : List[Any] =get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Tuple =acta.a
| 92 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 1 |
'''simple docstring'''
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCAmelCase ( __magic_name__ : int = 20 ) -> str:
lowercase : Any =math.comb(__magic_name__ , __magic_name__ )
lowercase : Any =math.comb(NUM_BALLS - BALLS_PER_COLOUR , __magic_name__ )
lowercase : Optional[Any] =NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
if not sentence:
return ""
lowercase : Tuple =dict(zip(__magic_name__ , __magic_name__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Tuple=18 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , ):
'''simple docstring'''
lowercase : Union[str, Any] =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Union[str, Any] =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[int] =num_channels
lowercase : List[str] =image_size
lowercase : Dict =min_resolution
lowercase : str =max_resolution
lowercase : int =do_resize
lowercase : int =size
lowercase : Union[str, Any] =apply_ocr
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Tuple =LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase : Tuple =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
lowercase : Union[str, Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Optional[int] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# with apply_OCR = True
lowercase : Tuple =LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase : List[Any] =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
lowercase : Dict =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowercase : Tuple =image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase : Tuple =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase : Optional[Any] =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
lowercase : Union[str, Any] =LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
lowercase : str =image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 92 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
UpperCamelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
UpperCamelCase_ = args.per_device_eval_batch_size
UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase_ = True
UpperCamelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
UpperCamelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
UpperCamelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int ) -> int:
lowercase : Optional[Any] =np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase : Optional[int] =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase : Any =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase : Any =time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase : Dict =time.time()
lowercase : Optional[int] =end_time - start_time
lowercase : Dict =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase_ = raw_datasets["""validation"""].column_names
UpperCamelCase_ = """question""" if """question""" in column_names else column_names[0]
UpperCamelCase_ = """context""" if """context""" in column_names else column_names[1]
UpperCamelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase : Union[str, Any] =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase : int =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase : Union[str, Any] =tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase : Tuple =[]
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase : Dict =tokenized_examples.sequence_ids(__magic_name__ )
lowercase : Optional[int] =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase : Union[str, Any] =sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase : Optional[int] =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
UpperCamelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
UpperCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
UpperCamelCase_ = default_data_collator
UpperCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
UpperCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : str="eval" ) -> Tuple:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase : Tuple =postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase : List[str] =[
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase : Union[str, Any] =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase : int =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
UpperCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0
UpperCamelCase_ = timeit.default_timer()
UpperCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase_ , UpperCamelCase_ = outputs
UpperCamelCase_ = torch.tensor(start_logits)
UpperCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
UpperCamelCase_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
UpperCamelCase_ = """▁"""
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = AlbertTokenizer
def __init__( self : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : List[str]="[SEP]" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Any =(
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Any =do_lower_case
lowercase : Optional[Any] =remove_space
lowercase : Dict =keep_accents
lowercase : Optional[Any] =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : int =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
@experimental
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> Any:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> int:
lowercase : Optional[int] =num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Any =[] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Tuple =len(__magic_name__ ) // num_proc
lowercase : Any =len(__magic_name__ ) % num_proc
lowercase : Tuple =div * index + min(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] =start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(__magic_name__ )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
lowercase , lowercase : Union[str, Any] =None, None
if not disable_tqdm:
lowercase , lowercase : Tuple =(RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Any =pool.map(__magic_name__ , __magic_name__ )
logger.info(f'''Finished {num_proc} processes''' )
lowercase : Dict =[obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(__magic_name__ )} objects''' )
return mapped
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Any ) -> str:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : int =backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : Tuple =None
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any =processors[data_args.task_name]()
lowercase : Optional[int] =processor.get_labels()
lowercase : str =len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict:
lowercase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : Dict =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate()
lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
def _lowerCAmelCase ( __magic_name__ : TreeNode | None ) -> bool:
# Validation
def is_valid_tree(__magic_name__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__magic_name__ , __magic_name__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__magic_name__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__magic_name__ : TreeNode | None , __magic_name__ : float , __magic_name__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __magic_name__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __magic_name__ )
)
return is_binary_search_tree_recursive_check(__magic_name__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] ={}
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[Any] ={}
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCAmelCase__ )
if nodea not in self.connections:
self.add_node(UpperCAmelCase__ )
lowercase : Union[str, Any] =probability
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return list(self.connections )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =0
lowercase : str =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[tuple[str, str, float]] , __magic_name__ : int ) -> dict[str, int]:
lowercase : Dict =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Any =Counter(graph.get_nodes() )
lowercase : Optional[int] =start
for _ in range(__magic_name__ ):
lowercase : Dict =graph.transition(__magic_name__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( __magic_name__ : dict ) -> tuple:
return (data["data"], data["target"])
def _lowerCAmelCase ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray ) -> np.ndarray:
lowercase : Union[str, Any] =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__magic_name__ , __magic_name__ )
# Predict target for test data
lowercase : int =xgb.predict(__magic_name__ )
lowercase : Tuple =predictions.reshape(len(__magic_name__ ) , 1 )
return predictions
def _lowerCAmelCase ( ) -> None:
lowercase : Tuple =fetch_california_housing()
lowercase , lowercase : int =data_handling(__magic_name__ )
lowercase , lowercase , lowercase , lowercase : Optional[Any] =train_test_split(
__magic_name__ , __magic_name__ , test_size=0.2_5 , random_state=1 )
lowercase : List[str] =xgboost(__magic_name__ , __magic_name__ , __magic_name__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__magic_name__ , __magic_name__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(__magic_name__ , __magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase_ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
UpperCamelCase_ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def _lowerCAmelCase ( ) -> List[Any]:
lowercase , lowercase : Dict =randrange(len(__magic_name__ ) ), randrange(len(__magic_name__ ) )
lowercase : Optional[int] =['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase , lowercase : List[str] =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _lowerCAmelCase ( __magic_name__ : int = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(__magic_name__ ))
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Any ) -> List[str]:
assert PokerHand(__magic_name__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> Any:
assert PokerHand(__magic_name__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Any:
lowercase : Tuple =PokerHand(__magic_name__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Dict ) -> Any:
assert PokerHand(__magic_name__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple ) -> Tuple:
assert PokerHand(__magic_name__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[Any]:
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> int:
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
def _lowerCAmelCase ( ) -> int:
lowercase : Dict =[PokerHand(__magic_name__ ) for hand in SORTED_HANDS]
lowercase : Tuple =poker_hands.copy()
shuffle(__magic_name__ )
lowercase : Union[str, Any] =chain(sorted(__magic_name__ ) )
for index, hand in enumerate(__magic_name__ ):
assert hand == poker_hands[index]
def _lowerCAmelCase ( ) -> Optional[int]:
# Test that five high straights are compared correctly.
lowercase : Optional[int] =[PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=__magic_name__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _lowerCAmelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase : List[str] =PokerHand('''2C 4S AS 3D 5C''' )
lowercase : Optional[Any] =True
lowercase : Optional[Any] =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _lowerCAmelCase ( ) -> int:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase : Optional[Any] =0
lowercase : int =os.path.abspath(os.path.dirname(__magic_name__ ) )
lowercase : int =os.path.join(__magic_name__ , '''poker_hands.txt''' )
with open(__magic_name__ ) as file_hand:
for line in file_hand:
lowercase : Tuple =line[:14].strip()
lowercase : List[Any] =line[15:].strip()
lowercase , lowercase : Tuple =PokerHand(__magic_name__ ), PokerHand(__magic_name__ )
lowercase : Optional[Any] =player.compare_with(__magic_name__ )
if output == "Win":
answer += 1
assert answer == 376
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =inspect.getfile(accelerate.test_utils )
lowercase : List[str] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase : List[str] =test_metrics
@require_cpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase : List[str] =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
| 92 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPFeatureExtractor"""]
UpperCamelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 | 1 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[Any]:
random.seed(__magic_name__ )
np.random.seed(__magic_name__ )
torch.manual_seed(__magic_name__ )
torch.cuda.manual_seed_all(__magic_name__ )
# ^^ safe to call this function even if cuda is not available
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[torch.nn.Parameter] , UpperCAmelCase__ : float = 0.99_99 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Union[float, int] = 1.0 , UpperCAmelCase__ : Union[float, int] = 2 / 3 , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Dict[str, Any] = None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
if isinstance(UpperCAmelCase__ , torch.nn.Module ):
lowercase : Tuple =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ , )
lowercase : Optional[int] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase : str =True
if kwargs.get('''max_value''' , UpperCAmelCase__ ) is not None:
lowercase : List[Any] ='''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
lowercase : List[Any] =kwargs['''max_value''']
if kwargs.get('''min_value''' , UpperCAmelCase__ ) is not None:
lowercase : int ='''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
lowercase : Optional[int] =kwargs['''min_value''']
lowercase : Any =list(UpperCAmelCase__ )
lowercase : Optional[Any] =[p.clone().detach() for p in parameters]
if kwargs.get('''device''' , UpperCAmelCase__ ) is not None:
lowercase : Dict ='''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
self.to(device=kwargs['''device'''] )
lowercase : Dict =None
lowercase : List[str] =decay
lowercase : List[str] =min_decay
lowercase : Union[str, Any] =update_after_step
lowercase : Optional[int] =use_ema_warmup
lowercase : Any =inv_gamma
lowercase : Tuple =power
lowercase : int =0
lowercase : str =None # set in `step()`
lowercase : Union[str, Any] =model_cls
lowercase : Dict =model_config
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase , lowercase : Dict =model_cls.load_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ )
lowercase : int =model_cls.from_pretrained(UpperCAmelCase__ )
lowercase : int =cls(model.parameters() , model_cls=UpperCAmelCase__ , model_config=model.config )
ema_model.load_state_dict(UpperCAmelCase__ )
return ema_model
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase : List[str] =self.model_cls.from_config(self.model_config )
lowercase : Tuple =self.state_dict()
state_dict.pop('''shadow_params''' , UpperCAmelCase__ )
model.register_to_config(**UpperCAmelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[str] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase : List[str] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase : List[str] =(1 + step) / (10 + step)
lowercase : Union[str, Any] =min(UpperCAmelCase__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase : Any =max(UpperCAmelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(UpperCAmelCase__ , torch.nn.Module ):
lowercase : List[str] =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ , )
lowercase : Union[str, Any] =parameters.parameters()
lowercase : str =list(UpperCAmelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase : str =self.get_decay(self.optimization_step )
lowercase : Union[str, Any] =decay
lowercase : Dict =1 - decay
lowercase : Union[str, Any] =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCAmelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase : Tuple =deepspeed.zero.GatheredParameters(UpperCAmelCase__ , modifier_rank=UpperCAmelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase : Optional[Any] =list(UpperCAmelCase__ )
for s_param, param in zip(self.shadow_params , UpperCAmelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None ):
'''simple docstring'''
lowercase : Optional[int] =[
p.to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) if p.is_floating_point() else p.to(device=UpperCAmelCase__ )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase : Union[str, Any] =[param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , UpperCAmelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase : str =None
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : dict ):
'''simple docstring'''
lowercase : Tuple =copy.deepcopy(UpperCAmelCase__ )
lowercase : List[str] =state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase : List[Any] =state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , UpperCAmelCase__ ):
raise ValueError('''Invalid min_decay''' )
lowercase : Optional[Any] =state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , UpperCAmelCase__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase : int =state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , UpperCAmelCase__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase : Dict =state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCAmelCase__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase : Optional[Any] =state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase : List[str] =state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase : List[str] =state_dict.get('''shadow_params''' , UpperCAmelCase__ )
if shadow_params is not None:
lowercase : Optional[int] =shadow_params
if not isinstance(self.shadow_params , UpperCAmelCase__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(UpperCAmelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 92 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict:
lowercase : List[str] =R'''\w+[.]\d+'''
lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str:
lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : str =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase : Optional[Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase : Dict =flatten_dict(__magic_name__ )
lowercase : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict =rename_key(__magic_name__ )
lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase : Tuple =jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 92 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase : int =ksize + 1
lowercase : str =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
# distance from center
lowercase : Optional[Any] =x - ksize // 2
lowercase : List[str] =y - ksize // 2
# degree to radiant
lowercase : Optional[int] =theta / 180 * np.pi
lowercase : Union[str, Any] =np.cos(_theta )
lowercase : Optional[int] =np.sin(_theta )
# get kernel x
lowercase : Tuple =cos_theta * px + sin_theta * py
# get kernel y
lowercase : Dict =-sin_theta * px + cos_theta * py
# fill kernel
lowercase : str =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase_ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase_ = out / out.max() * 255
UpperCamelCase_ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = """src/diffusers"""
UpperCamelCase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase_ = spec.loader.load_module()
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Dict:
return line.startswith(__magic_name__ ) or len(__magic_name__ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , __magic_name__ ) is not None
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> str:
lowercase : Dict =object_name.split('''.''' )
lowercase : int =0
# First let's find the module where our object lives.
lowercase : Optional[int] =parts[i]
while i < len(__magic_name__ ) and not os.path.isfile(os.path.join(__magic_name__ , f'''{module}.py''' ) ):
i += 1
if i < len(__magic_name__ ):
lowercase : str =os.path.join(__magic_name__ , parts[i] )
if i >= len(__magic_name__ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__magic_name__ , f'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : List[Any] =f.readlines()
# Now let's find the class / func in the code!
lowercase : int =''''''
lowercase : Optional[int] =0
for name in parts[i + 1 :]:
while (
line_index < len(__magic_name__ ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__magic_name__ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase : str =line_index
while line_index < len(__magic_name__ ) and _should_continue(lines[line_index] , __magic_name__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : Tuple =lines[start_index:line_index]
return "".join(__magic_name__ )
UpperCamelCase_ = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCamelCase_ = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCamelCase_ = re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase ( __magic_name__ : Any ) -> Dict:
lowercase : Union[str, Any] =code.split('''\n''' )
lowercase : List[str] =0
while idx < len(__magic_name__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__magic_name__ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> str:
lowercase : Optional[Any] =len(get_indent(__magic_name__ ) ) > 0
if has_indent:
lowercase : Any =f'''class Bla:\n{code}'''
lowercase : Any =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__magic_name__ )
lowercase : Optional[int] =black.format_str(__magic_name__ , mode=__magic_name__ )
lowercase , lowercase : Optional[Any] =style_docstrings_in_code(__magic_name__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=False ) -> Any:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Dict =f.readlines()
lowercase : int =[]
lowercase : str =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__magic_name__ ):
lowercase : Tuple =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase : Optional[int] =search.groups()
lowercase : List[Any] =find_code_in_diffusers(__magic_name__ )
lowercase : Dict =get_indent(__magic_name__ )
lowercase : Optional[Any] =line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase : str =theoretical_indent
lowercase : Union[str, Any] =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase : Tuple =True
while line_index < len(__magic_name__ ) and should_continue:
line_index += 1
if line_index >= len(__magic_name__ ):
break
lowercase : int =lines[line_index]
lowercase : Any =_should_continue(__magic_name__ , __magic_name__ ) and re.search(f'''^{indent}# End copy''' , __magic_name__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : Optional[Any] =lines[start_index:line_index]
lowercase : List[str] =''''''.join(__magic_name__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase : Optional[Any] =[line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__magic_name__ ) is None]
lowercase : str ='''\n'''.join(__magic_name__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(__magic_name__ ) > 0:
lowercase : List[Any] =replace_pattern.replace('''with''' , '''''' ).split(''',''' )
lowercase : List[str] =[_re_replace_pattern.search(__magic_name__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase : List[Any] =pattern.groups()
lowercase : Tuple =re.sub(__magic_name__ , __magic_name__ , __magic_name__ )
if option.strip() == "all-casing":
lowercase : Any =re.sub(obja.lower() , obja.lower() , __magic_name__ )
lowercase : List[str] =re.sub(obja.upper() , obja.upper() , __magic_name__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase : str =blackify(lines[start_index - 1] + theoretical_code )
lowercase : List[str] =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase : List[Any] =lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase : Optional[int] =start_index + 1
if overwrite and len(__magic_name__ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__magic_name__ )
return diffs
def _lowerCAmelCase ( __magic_name__ : bool = False ) -> int:
lowercase : Union[str, Any] =glob.glob(os.path.join(__magic_name__ , '''**/*.py''' ) , recursive=__magic_name__ )
lowercase : Any =[]
for filename in all_files:
lowercase : List[Any] =is_copy_consistent(__magic_name__ , __magic_name__ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__magic_name__ ) > 0:
lowercase : Optional[Any] ='''\n'''.join(__magic_name__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 92 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[int]:
if "emb" in name:
lowercase : List[str] =name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowercase : List[str] =name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowercase : List[str] =name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowercase : Optional[int] =name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowercase : str =name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowercase : Tuple =name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowercase : Tuple =name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowercase : Dict =name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowercase : Tuple =name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowercase : Tuple =name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase : Tuple =name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _lowerCAmelCase ( __magic_name__ : OrderedDict , __magic_name__ : int ) -> Tuple[Dict, Dict]:
lowercase : Any =list(state_dict.keys() )
lowercase : Dict ={}
for key in keys:
lowercase : Optional[int] =state_dict.pop(__magic_name__ )
lowercase : str =rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase : Optional[int] =val[:hidden_size, :]
lowercase : List[Any] =val[hidden_size : 2 * hidden_size, :]
lowercase : Any =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase : Union[str, Any] =val
else:
lowercase : List[str] =val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( __magic_name__ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowercase : Tuple =1024
lowercase : List[Any] =24
lowercase : Any =16
elif checkpoint == "medium":
lowercase : Dict =1536
lowercase : Union[str, Any] =48
lowercase : str =24
elif checkpoint == "large":
lowercase : Optional[Any] =2048
lowercase : str =48
lowercase : Union[str, Any] =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase : Dict =MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]="cpu" ) -> List[Any]:
lowercase : Optional[int] =MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
lowercase : Optional[int] =decoder_config_from_checkpoint(__magic_name__ )
lowercase : List[str] =fairseq_model.lm.state_dict()
lowercase , lowercase : List[str] =rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
lowercase : List[Any] =TaEncoderModel.from_pretrained('''t5-base''' )
lowercase : str =EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowercase : Tuple =MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase , lowercase : List[Any] =decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__magic_name__ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase : Dict =MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
lowercase : str =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase : Optional[Any] =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase : Tuple =model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowercase : List[Any] =AutoTokenizer.from_pretrained('''t5-base''' )
lowercase : List[Any] =AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowercase : Tuple =MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
lowercase : int =2048
lowercase : str =2048
# set other default generation config params
lowercase : List[str] =int(30 * audio_encoder.config.frame_rate )
lowercase : List[str] =True
lowercase : int =3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCamelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : str ) -> Any:
# load base model
lowercase : Any =StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase : Tuple =load_file(__magic_name__ )
lowercase : str =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase : Optional[int] =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowercase : List[str] =pipeline.text_encoder
else:
lowercase : Any =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowercase : Any =pipeline.unet
# find the target layer
lowercase : Dict =layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase : Tuple =curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : Any =layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase : int =layer_infos.pop(0 )
lowercase : Optional[Any] =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase : Optional[int] =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase : Dict =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase : List[str] =state_dict[pair_keys[0]].to(torch.floataa )
lowercase : Union[str, Any] =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.base_model_path
UpperCamelCase_ = args.checkpoint_path
UpperCamelCase_ = args.dump_path
UpperCamelCase_ = args.lora_prefix_unet
UpperCamelCase_ = args.lora_prefix_text_encoder
UpperCamelCase_ = args.alpha
UpperCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Dict=400 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[Any]=None , ):
'''simple docstring'''
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 18}
lowercase : Union[str, Any] =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase : List[Any] =parent
lowercase : Optional[Any] =batch_size
lowercase : str =num_channels
lowercase : Any =num_frames
lowercase : Any =image_size
lowercase : Optional[Any] =min_resolution
lowercase : Tuple =max_resolution
lowercase : Any =do_resize
lowercase : Optional[int] =size
lowercase : int =do_normalize
lowercase : List[str] =image_mean
lowercase : Union[str, Any] =image_std
lowercase : int =crop_size
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =VivitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# Initialize image_processing
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowercase : Tuple =prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowercase : Union[str, Any] =image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Optional[int] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
lowercase : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : List[Any] =prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowercase : Tuple =image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : str =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] =prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowercase : str =image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Any =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ):
'''simple docstring'''
lowercase : List[str] =[2, 1, 2, -1]
lowercase : Tuple =[1, 2, 3, 4]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =len(self.first_signal )
lowercase : Any =len(self.second_signal )
lowercase : Optional[Any] =max(UpperCAmelCase__ , UpperCAmelCase__ )
# create a zero matrix of max_length x max_length
lowercase : Any =[[0] * max_length for i in range(UpperCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase__ ):
lowercase : Any =deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase__ )
for j, item in enumerate(UpperCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase : Optional[Any] =np.matmul(np.transpose(UpperCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 92 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : List[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 92 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 100 ) -> int:
lowercase : Union[str, Any] =set()
lowercase : List[Any] =0
lowercase : List[Any] =n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
lowercase : Union[str, Any] =a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase_ = """examples/"""
UpperCamelCase_ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase_ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase_ = """README.md"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> Optional[Any]:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : List[Any] =f.read()
lowercase , lowercase : Union[str, Any] =REPLACE_PATTERNS[pattern]
lowercase : List[str] =replace.replace('''VERSION''' , __magic_name__ )
lowercase : Any =re_pattern.sub(__magic_name__ , __magic_name__ )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Any:
for folder, directories, fnames in os.walk(__magic_name__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__magic_name__ , __magic_name__ ) , __magic_name__ , pattern='''examples''' )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : List[Any]=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__magic_name__ , __magic_name__ , __magic_name__ )
if not patch:
update_version_in_examples(__magic_name__ )
def _lowerCAmelCase ( ) -> Optional[int]:
lowercase : str ='''🤗 Transformers currently provides the following architectures'''
lowercase : List[str] ='''1. Want to contribute a new model?'''
with open(__magic_name__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Tuple =f.readlines()
# Find the start of the list.
lowercase : Dict =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Any =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase : List[str] =lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__magic_name__ )
def _lowerCAmelCase ( ) -> str:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase : Dict =f.read()
lowercase : Union[str, Any] =REPLACE_PATTERNS['''init'''][0].search(__magic_name__ ).groups()[0]
return packaging.version.parse(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple=False ) -> List[Any]:
lowercase : Tuple =get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase : Optional[Any] =default_version.base_version
elif patch:
lowercase : Any =f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase : Dict =f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase : List[str] =input(f'''Which version are you releasing? [{default_version}]''' )
if len(__magic_name__ ) == 0:
lowercase : List[str] =default_version
print(f'''Updating version to {version}.''' )
global_version_update(__magic_name__ , patch=__magic_name__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _lowerCAmelCase ( ) -> int:
lowercase : str =get_version()
lowercase : int =f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase : int =current_version.base_version
# Check with the user we got that right.
lowercase : Dict =input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__magic_name__ ) == 0:
lowercase : List[str] =dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__magic_name__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 92 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple , **UpperCAmelCase__ : int ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase : Optional[int] =deprecated_arg[3:]
setattr(self , UpperCAmelCase__ , not kwargs.pop(UpperCAmelCase__ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase : int =kwargs.pop('''torchscript''' , self.torchscript )
lowercase : List[str] =kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase : int =kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCAmelCase__ )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCamelCase_ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase : List[Any] =torch.device('''cpu''' )
lowercase : int =0
elif is_torch_tpu_available():
lowercase : Optional[Any] =xm.xla_device()
lowercase : int =0
else:
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase : str =torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return self.n_gpu > 0
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCamelCase_ = """bert-base-cased"""
UpperCamelCase_ = """google/pegasus-xsum"""
UpperCamelCase_ = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
UpperCamelCase_ = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
UpperCamelCase_ = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase_ = """sshleifer/bart-tiny-random"""
UpperCamelCase_ = """sshleifer/tiny-mbart"""
UpperCamelCase_ = """sshleifer/tiny-marian-en-de"""
def _lowerCAmelCase ( __magic_name__ : Path , __magic_name__ : list ) -> str:
lowercase : Any ='''\n'''.join(__magic_name__ )
Path(__magic_name__ ).open('''w''' ).writelines(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : int ) -> List[Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__magic_name__ , f'''{split}.source''' ) , __magic_name__ )
_dump_articles(os.path.join(__magic_name__ , f'''{split}.target''' ) , __magic_name__ )
return tmp_dir
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : List[str] =AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : Dict =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : Dict =max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
lowercase : str =max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
lowercase : List[str] =4
lowercase : str =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase , lowercase : Dict ='''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
lowercase : List[str] =SeqaSeqDataset(
UpperCAmelCase__ , data_dir=UpperCAmelCase__ , type_path='''train''' , max_source_length=UpperCAmelCase__ , max_target_length=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , )
lowercase : Optional[Any] =DataLoader(UpperCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase : Tuple =shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : Optional[Any] =max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
lowercase : Dict =max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
lowercase : Optional[int] =4
lowercase : str =LegacySeqaSeqDataset(
UpperCAmelCase__ , data_dir=UpperCAmelCase__ , type_path='''train''' , max_source_length=20 , max_target_length=UpperCAmelCase__ , )
lowercase : Optional[Any] =DataLoader(UpperCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Tuple =AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
lowercase : List[str] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase : Tuple =tmp_dir.joinpath('''train.source''' ).open().readlines()
lowercase : int =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase__ , UpperCAmelCase__ , 128 , UpperCAmelCase__ )
lowercase : Union[str, Any] ={x.name for x in tmp_dir.iterdir()}
lowercase : Dict ={x.name for x in save_dir.iterdir()}
lowercase : str =save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase__ ) < len(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
lowercase , lowercase , lowercase : Union[str, Any] =self._get_dataset(max_len=64 )
lowercase : List[str] =64
lowercase : Dict =ds.make_dynamic_sampler(UpperCAmelCase__ , required_batch_size_multiple=UpperCAmelCase__ )
lowercase : Union[str, Any] =[len(UpperCAmelCase__ ) for x in batch_sampler]
assert len(set(UpperCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # no dropped or added examples
lowercase : Optional[int] =DataLoader(UpperCAmelCase__ , batch_sampler=UpperCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : List[Any] =[]
lowercase : Tuple =[]
for batch in data_loader:
lowercase : str =batch['''input_ids'''].shape
lowercase : List[Any] =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase : Any =np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(UpperCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase__ )
assert num_src_per_batch[0] == max(UpperCAmelCase__ )
if failures:
raise AssertionError(F'''too many tokens in {len(UpperCAmelCase__ )} batches''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase , lowercase : Optional[Any] =self._get_dataset(max_len=512 )
lowercase : Optional[Any] =2
lowercase : List[Any] =ds.make_sortish_sampler(UpperCAmelCase__ , shuffle=UpperCAmelCase__ )
lowercase : List[Any] =DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : List[str] =DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCAmelCase__ )
lowercase : Any =tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]="input_ids" ):
return [batch[k].eq(UpperCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase__ , k='''labels''' ) ) < sum(count_pad_tokens(UpperCAmelCase__ , k='''labels''' ) )
assert sum(count_pad_tokens(UpperCAmelCase__ ) ) < sum(count_pad_tokens(UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=1000 , UpperCAmelCase__ : List[str]=128 ):
'''simple docstring'''
if os.getenv('''USE_REAL_DATA''' , UpperCAmelCase__ ):
lowercase : Any ='''examples/seq2seq/wmt_en_ro'''
lowercase : Any =max_len * 2 * 64
if not Path(UpperCAmelCase__ ).joinpath('''train.len''' ).exists():
save_len_file(UpperCAmelCase__ , UpperCAmelCase__ )
else:
lowercase : List[Any] ='''examples/seq2seq/test_data/wmt_en_ro'''
lowercase : Union[str, Any] =max_len * 4
save_len_file(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : str =SeqaSeqDataset(
UpperCAmelCase__ , data_dir=UpperCAmelCase__ , type_path='''train''' , max_source_length=UpperCAmelCase__ , max_target_length=UpperCAmelCase__ , n_obs=UpperCAmelCase__ , )
return ds, max_tokens, tokenizer
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase , lowercase : int =self._get_dataset()
lowercase : Union[str, Any] =set(DistributedSortishSampler(UpperCAmelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCAmelCase__ ) )
lowercase : Tuple =set(DistributedSortishSampler(UpperCAmelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCAmelCase__ ) )
assert idsa.intersection(UpperCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(UpperCAmelCase__ , use_fast=UpperCAmelCase__ )
if tok_name == MBART_TINY:
lowercase : str =SeqaSeqDataset(
UpperCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
lowercase : Optional[int] =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase : str =SeqaSeqDataset(
UpperCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
lowercase : Optional[Any] =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase__ ) == 0
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(UpperCAmelCase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase : list[float] =list(UpperCAmelCase__ )
lowercase : Union[str, Any] =degree
def __add__( self : Any , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase : Optional[int] =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCAmelCase__ )
else:
lowercase : Dict =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCAmelCase__ )
def __sub__( self : List[str] , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Any ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
lowercase : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int | float ):
'''simple docstring'''
lowercase : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase__ )
return polynomial
def __repr__( self : Dict ):
'''simple docstring'''
return self.__str__()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : list[float] =[0] * self.degree
for i in range(self.degree ):
lowercase : Tuple =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int | float = 0 ):
'''simple docstring'''
lowercase : list[float] =[0] * (self.degree + 2)
lowercase : str =constant
for i in range(self.degree + 1 ):
lowercase : Union[str, Any] =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCAmelCase__ )
def __eq__( self : str , UpperCAmelCase__ : object ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , UpperCAmelCase__ : object ):
'''simple docstring'''
return not self.__eq__(UpperCAmelCase__ )
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any =processors[data_args.task_name]()
lowercase : Optional[int] =processor.get_labels()
lowercase : str =len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict:
lowercase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : Dict =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate()
lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase_ = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
UpperCamelCase_ = dataset.iloc[:, 1:2].values
UpperCamelCase_ = dataset.iloc[:, 2].values
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase_ = PolynomialFeatures(degree=4)
UpperCamelCase_ = poly_reg.fit_transform(X)
UpperCamelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def _lowerCAmelCase ( ) -> str:
plt.scatter(__magic_name__ , __magic_name__ , color='''red''' )
plt.plot(__magic_name__ , pol_reg.predict(poly_reg.fit_transform(__magic_name__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCamelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCamelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if """ """ in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
UpperCamelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase_ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Dict =torch.manual_seed(0 )
lowercase : Union[str, Any] =pipe.dual_guided(
prompt='''first prompt''' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[Any] =generator.manual_seed(0 )
lowercase : Optional[Any] =pipe.dual_guided(
prompt='''first prompt''' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int ='''cyberpunk 2077'''
lowercase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : Optional[Any] =pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase : List[Any] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : int =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : List[str] ='''A painting of a squirrel eating a burger '''
lowercase : str =torch.manual_seed(0 )
lowercase : Any =pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[Any] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Dict =pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''numpy''' ).images
lowercase : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'microsoft/speecht5_tts'
lowerCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
lowerCamelCase_ = 'text_reader'
lowerCamelCase_ = SpeechTaProcessor
lowerCamelCase_ = SpeechTaForTextToSpeech
lowerCamelCase_ = SpeechTaHifiGan
lowerCamelCase_ = ['text']
lowerCamelCase_ = ['audio']
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.post_processor is None:
lowercase : int ='''microsoft/speecht5_hifigan'''
super().setup()
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=None ):
'''simple docstring'''
lowercase : List[str] =self.pre_processor(text=UpperCAmelCase__ , return_tensors='''pt''' , truncation=UpperCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowercase : List[str] =load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowercase : Optional[Any] =torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCAmelCase__ ).cpu().detach()
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("""T""")
UpperCamelCase_ = TypeVar("""U""")
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : List[str] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ):
'''simple docstring'''
lowercase : List[str] =key
lowercase : Optional[Any] =val
lowercase : DoubleLinkedListNode[T, U] | None =None
lowercase : DoubleLinkedListNode[T, U] | None =None
def __repr__( self : Any ):
'''simple docstring'''
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : int ):
'''simple docstring'''
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase , lowercase : Any =self.rear, self.head
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =['''DoubleLinkedList''']
lowercase : Union[str, Any] =self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
lowercase : Any =node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
lowercase : Optional[int] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase : Dict =node
lowercase : str =previous
lowercase : Optional[Any] =node
lowercase : Optional[int] =self.rear
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowercase : Any =node.next
lowercase : Optional[int] =node.prev
lowercase : Any =None
lowercase : int =None
return node
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
lowerCamelCase_ = {}
def __init__( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : DoubleLinkedList[T, U] =DoubleLinkedList()
lowercase : Union[str, Any] =capacity
lowercase : int =0
lowercase : List[Any] =0
lowercase : Optional[Any] =0
lowercase : dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : List[str] ):
'''simple docstring'''
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Dict , UpperCAmelCase__ : T ):
'''simple docstring'''
return key in self.cache
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : T ):
'''simple docstring'''
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase : DoubleLinkedListNode[T, U] =self.cache[key]
lowercase : int =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : T , UpperCAmelCase__ : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase : Dict =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase : Dict =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase : Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase : Any =value
self.list.add(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase : Dict =LRUCache(UpperCAmelCase__ )
lowercase : Tuple =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase : Union[str, Any] =func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
UpperCamelCase_ = {
"""google/pegasus-xsum""": 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PegasusTokenizer
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Dict="<mask_2>" , UpperCAmelCase__ : str="<mask_1>" , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=103 , **UpperCAmelCase__ : int , ):
'''simple docstring'''
lowercase : Dict =offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(UpperCAmelCase__ )}, but is'''
F''' {type(UpperCAmelCase__ )}''' )
lowercase : List[Any] =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(UpperCAmelCase__ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase : Optional[int] =additional_special_tokens_extended
else:
lowercase : List[Any] =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Tuple =vocab_file
lowercase : Optional[int] =False if not self.vocab_file else True
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Tuple =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 92 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'perceiver'
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int]=256 , UpperCAmelCase__ : List[str]=1280 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : List[Any]=26 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any="kv" , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=1E-12 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=262 , UpperCAmelCase__ : Optional[int]=2048 , UpperCAmelCase__ : Optional[int]=56 , UpperCAmelCase__ : List[str]=[368, 496] , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : List[str]=1920 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : List[Any]=[1, 16, 224, 224] , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Any =num_latents
lowercase : Optional[int] =d_latents
lowercase : Optional[Any] =d_model
lowercase : Dict =num_blocks
lowercase : Dict =num_self_attends_per_block
lowercase : List[str] =num_self_attention_heads
lowercase : Optional[int] =num_cross_attention_heads
lowercase : Tuple =qk_channels
lowercase : List[Any] =v_channels
lowercase : Dict =cross_attention_shape_for_attention
lowercase : Optional[int] =self_attention_widening_factor
lowercase : int =cross_attention_widening_factor
lowercase : Tuple =hidden_act
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : Optional[int] =initializer_range
lowercase : Union[str, Any] =layer_norm_eps
lowercase : List[str] =use_query_residual
# masked language modeling attributes
lowercase : Tuple =vocab_size
lowercase : str =max_position_embeddings
# image classification attributes
lowercase : str =image_size
# flow attributes
lowercase : Any =train_size
# multimodal autoencoding attributes
lowercase : List[str] =num_frames
lowercase : str =audio_samples_per_frame
lowercase : int =samples_per_patch
lowercase : Dict =output_shape
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Dict ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Any ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : int = 40 , ):
'''simple docstring'''
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str =compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Optional[Any] =preprocessor.num_special_tokens_to_add(UpperCAmelCase__ )
lowercase : Optional[int] =compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Any =[''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Optional[Any] =dict(preprocessor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
lowercase : int =inputs.pop('''input_ids''' )
return inputs
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : Union[str, Any] =compute_effective_axis_dimension(UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : Dict =self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =dict(preprocessor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
lowercase : int =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 92 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict:
lowercase : List[str] =R'''\w+[.]\d+'''
lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str:
lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : str =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase : Optional[Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase : Dict =flatten_dict(__magic_name__ )
lowercase : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict =rename_key(__magic_name__ )
lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase : Tuple =jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 92 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict ):
'''simple docstring'''
lowercase : Dict =''''''
lowercase : Tuple =''''''
lowercase : List[str] =[]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase : List[Any] =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase : Optional[int] =self.__min_dist_top_down_dp(UpperCAmelCase__ , n - 1 )
lowercase : Any =self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase__ )
lowercase : List[str] =self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase : Optional[Any] =1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self.dp[m][n]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[str] =worda
lowercase : List[Any] =worda
lowercase : List[str] =[[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1 , len(UpperCAmelCase__ ) - 1 )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[Any] =worda
lowercase : Any =worda
lowercase : str =len(UpperCAmelCase__ )
lowercase : List[Any] =len(UpperCAmelCase__ )
lowercase : int =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase : Any =j
elif j == 0: # second string is empty
lowercase : Tuple =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase : Dict =self.dp[i - 1][j - 1]
else:
lowercase : List[str] =self.dp[i][j - 1]
lowercase : Any =self.dp[i - 1][j]
lowercase : int =self.dp[i - 1][j - 1]
lowercase : List[Any] =1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase_ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
UpperCamelCase_ = input("""Enter the first string: """).strip()
UpperCamelCase_ = input("""Enter the second string: """).strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase_ = random.Random()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Tuple=1.0 , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None ) -> Dict:
if rng is None:
lowercase : List[Any] =global_rng
lowercase : Any =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : str=2000 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : int=160 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=4000 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]=True , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[Any] =batch_size
lowercase : Dict =min_seq_length
lowercase : Optional[Any] =max_seq_length
lowercase : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase : Tuple =padding_value
lowercase : Dict =sampling_rate
lowercase : Union[str, Any] =return_attention_mask
lowercase : str =do_normalize
lowercase : List[Any] =feature_size
lowercase : List[str] =chunk_length
lowercase : Tuple =hop_length
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=False ):
'''simple docstring'''
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowercase : List[str] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase : Any =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase : List[Any] =[np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Tuple =WhisperFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =feat_extract_first.save_pretrained(UpperCAmelCase__ )[0]
check_json_file_has_correct_format(UpperCAmelCase__ )
lowercase : List[Any] =self.feature_extraction_class.from_pretrained(UpperCAmelCase__ )
lowercase : List[Any] =feat_extract_first.to_dict()
lowercase : Tuple =feat_extract_second.to_dict()
lowercase : Dict =feat_extract_first.mel_filters
lowercase : List[Any] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[int] =os.path.join(UpperCAmelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCAmelCase__ )
lowercase : Tuple =self.feature_extraction_class.from_json_file(UpperCAmelCase__ )
lowercase : Dict =feat_extract_first.to_dict()
lowercase : List[str] =feat_extract_second.to_dict()
lowercase : List[str] =feat_extract_first.mel_filters
lowercase : Dict =feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase : Optional[Any] =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : int =[np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowercase : Dict =feature_extractor(UpperCAmelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase : str =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowercase : Any =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowercase : List[str] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
lowercase : str =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase : str =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase : Dict =np.asarray(UpperCAmelCase__ )
lowercase : Union[str, Any] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
lowercase : Optional[Any] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test truncation required
lowercase : Optional[Any] =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase : List[Any] =[np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
lowercase : Union[str, Any] =[x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase : List[Any] =[np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs_truncated]
lowercase : List[str] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
lowercase : List[Any] =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
lowercase : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Tuple =np.random.rand(100 , 32 ).astype(np.floataa )
lowercase : Tuple =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase : Optional[int] =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase : Dict =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : List[Any] =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowercase : Dict =ds.sort('''id''' ).select(range(UpperCAmelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# fmt: off
lowercase : List[Any] =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowercase : Any =self._load_datasamples(1 )
lowercase : List[str] =WhisperFeatureExtractor()
lowercase : List[str] =feature_extractor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase__ , atol=1E-4 ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Optional[int] =self._load_datasamples(1 )[0]
lowercase : Tuple =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
lowercase : Tuple =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase__ ) - 1 ) < 1E-3 ) )
| 92 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase_ = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCamelCase_ = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
UpperCamelCase_ = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , ):
'''simple docstring'''
lowercase : Optional[int] =len(references[0] )
if any(len(UpperCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase : Optional[int] =[[refs[i] for refs in references] for i in range(UpperCAmelCase__ )]
lowercase : str =TER(
normalized=UpperCAmelCase__ , no_punct=UpperCAmelCase__ , asian_support=UpperCAmelCase__ , case_sensitive=UpperCAmelCase__ , )
lowercase : Optional[Any] =sb_ter.corpus_score(UpperCAmelCase__ , UpperCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None ):
'''simple docstring'''
self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=None ):
'''simple docstring'''
if red is not None:
lowercase : int =red
if green is not None:
lowercase : int =green
if blue is not None:
lowercase : Tuple =blue
if red_edge is not None:
lowercase : Union[str, Any] =red_edge
if nir is not None:
lowercase : Optional[int] =nir
return True
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int="" , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None ):
'''simple docstring'''
self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ )
lowercase : int ={
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=0.08 , UpperCAmelCase__ : Tuple=1.22 , UpperCAmelCase__ : List[str]=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.nir - self.green
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.nir / self.red
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase : Union[str, Any] =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.nir / self.red
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 92 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=0 , ):
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Dict =batch_size
lowercase : List[Any] =seq_length
lowercase : Optional[Any] =is_training
lowercase : Tuple =use_input_mask
lowercase : Dict =use_token_type_ids
lowercase : Any =use_labels
lowercase : List[Any] =vocab_size
lowercase : int =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Optional[Any] =intermediate_size
lowercase : str =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =num_labels
lowercase : Any =num_choices
lowercase : Dict =scope
lowercase : Any =projection_dim
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
if self.use_token_type_ids:
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =None
lowercase : Optional[int] =None
if self.use_labels:
lowercase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : int =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
lowercase : List[Any] =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =TFDPRContextEncoder(config=UpperCAmelCase__ )
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : str =TFDPRQuestionEncoder(config=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Optional[Any] =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =TFDPRReader(config=UpperCAmelCase__ )
lowercase : Tuple =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =TFDPRModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] =TFDPRReader.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowercase : Any =tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase : int =model(UpperCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase : Any =tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 92 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """prophetnet.tokenizer"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
UpperCamelCase_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
UpperCamelCase_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
lowercase : Any =collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str =reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : str =token.rstrip('''\n''' )
lowercase : Dict =index
return vocab
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict="[SEP]" , UpperCAmelCase__ : Optional[Any]="[SEP]" , UpperCAmelCase__ : List[str]="[SEP]" , UpperCAmelCase__ : Optional[int]="[UNK]" , UpperCAmelCase__ : Tuple="[PAD]" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : Union[str, Any]="[MASK]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
lowercase : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : Any =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase : Tuple ={'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
lowercase : Any =F'''[unused{i}]'''
lowercase : List[Any] =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase : Dict =12
lowercase : List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase__ )
def __getstate__( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.__dict__.copy()
lowercase : str =None
return state
def __setstate__( self : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : List[str] ={}
lowercase : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__ )) + [1]
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : str =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Dict =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Tuple =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Tuple =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Any =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase : Any =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 92 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : list[list] ) -> list[list]:
lowercase : Dict =current_set.copy()
for row_index, row in enumerate(__magic_name__ ):
lowercase : Union[str, Any] =row[0]
for column_index, column in enumerate(__magic_name__ ):
if magnitude == 0:
lowercase : str =column
continue
lowercase : Any =column / magnitude
# Subtract to cancel term
lowercase : str =current_set[0]
lowercase : int =[first_row]
lowercase : List[str] =current_set[1::]
for row in current_set:
lowercase : Optional[Any] =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__magic_name__ )
continue
for column_index in range(len(__magic_name__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__magic_name__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase : Optional[int] =final_set[0]
lowercase : List[Any] =[]
lowercase : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase : Optional[int] =simplify(__magic_name__ )
for i in range(len(__magic_name__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __magic_name__ )
lowercase : List[Any] =resultant
return final_set
def _lowerCAmelCase ( __magic_name__ : list[list] ) -> list:
if len(__magic_name__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowercase : Optional[int] =len(__magic_name__ ) + 1
if any(len(__magic_name__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__magic_name__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__magic_name__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase : List[str] =equations.copy()
if any(0 in row for row in data_set ):
lowercase : List[str] =data_set.copy()
lowercase : str =[]
for row_index, row in enumerate(__magic_name__ ):
if 0 not in row:
lowercase : Any =data_set.pop(__magic_name__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __magic_name__ )
lowercase : Dict =data_set.copy()
lowercase : List[Any] =simplify(__magic_name__ )
lowercase : Dict =simplified[::-1]
lowercase : list =[]
for row in simplified:
lowercase : int =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase : Union[str, Any] =row.copy()[: len(__magic_name__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__magic_name__ ) == 0:
solutions.append(0 )
continue
lowercase : str =temp_row[1::]
lowercase : Union[str, Any] =temp_row[::-1]
for column_index, column in enumerate(__magic_name__ ):
current_solution -= column * solutions[column_index]
solutions.append(__magic_name__ )
lowercase : int =[]
for item in solutions:
final.append(float(round(__magic_name__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 92 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowercase : Dict =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Tuple =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='''crop_size''' )
lowercase : Dict =do_resize
lowercase : Tuple =size
lowercase : List[Any] =resample
lowercase : Any =do_center_crop
lowercase : Optional[Any] =crop_size
lowercase : List[Any] =do_rescale
lowercase : List[str] =rescale_factor
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : List[Any] =do_convert_rgb
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
lowercase : Any =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : List[str] =get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Optional[int] =size if size is not None else self.size
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , param_name='''size''' , default_to_square=UpperCAmelCase__ )
lowercase : int =resample if resample is not None else self.resample
lowercase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
lowercase : int =get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' , default_to_square=UpperCAmelCase__ )
lowercase : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowercase : List[str] =image_std if image_std is not None else self.image_std
lowercase : Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Tuple =[convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase : Any =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowercase : Dict =[self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
lowercase : Optional[int] =[self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : int =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowercase : Any =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =4
lowercase : Tuple =3
lowercase : List[str] =(32, 32)
lowercase : Dict =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Any =torch.tensor([10] ).to(UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Dict ={
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowercase : int =self.dummy_input
return init_dict, inputs_dict
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =4
lowercase : Any =4
lowercase : Any =(32, 32)
lowercase : List[Any] =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor([10] ).to(UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (4, 32, 32)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ={
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowercase : str =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : Dict =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase : List[Any] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowercase , lowercase : Tuple =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
model_accelerate.to(UpperCAmelCase__ )
model_accelerate.eval()
lowercase : int =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : Tuple =noise.to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ )
lowercase : Dict =model_accelerate(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase , lowercase : Optional[Any] =UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ , low_cpu_mem_usage=UpperCAmelCase__ )
model_normal_load.to(UpperCAmelCase__ )
model_normal_load.eval()
lowercase : List[str] =model_normal_load(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCAmelCase__ )
lowercase : Dict =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : List[Any] =noise.to(UpperCAmelCase__ )
lowercase : Dict =torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : Optional[int] =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : List[str] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase : Tuple =torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 ) )
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=(32, 32) ):
'''simple docstring'''
lowercase : Optional[int] =4
lowercase : Dict =3
lowercase : Optional[Any] =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Dict =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict ={
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowercase : Union[str, Any] =self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Dict =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase__ )
lowercase : int =self.dummy_input
lowercase : Tuple =floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase__ )
lowercase : Optional[int] =noise
lowercase : List[str] =model(**UpperCAmelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : str =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =4
lowercase : Tuple =3
lowercase : int =(256, 256)
lowercase : Optional[int] =torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : Dict =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : Optional[Any] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase : int =torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCAmelCase__ )
lowercase : List[Any] =4
lowercase : List[str] =3
lowercase : Optional[Any] =(32, 32)
lowercase : Dict =torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : int =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : List[Any] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase : str =torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# not required for this model
pass
| 92 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
UpperCamelCase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
UpperCamelCase_ = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
lowercase : Tuple =spearmanr(UpperCAmelCase__ , UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : List[Any]=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.0_02 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : Dict =parent
lowercase : int =batch_size
lowercase : Union[str, Any] =encoder_seq_length
lowercase : Optional[Any] =decoder_seq_length
# For common tests
lowercase : Optional[Any] =self.decoder_seq_length
lowercase : Optional[int] =is_training
lowercase : Any =use_attention_mask
lowercase : str =use_labels
lowercase : List[str] =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Tuple =num_attention_heads
lowercase : Union[str, Any] =d_ff
lowercase : Union[str, Any] =relative_attention_num_buckets
lowercase : int =dropout_rate
lowercase : Any =initializer_factor
lowercase : List[str] =eos_token_id
lowercase : List[Any] =pad_token_id
lowercase : int =decoder_start_token_id
lowercase : Optional[int] =None
lowercase : Tuple =decoder_layers
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase : Union[str, Any] =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase : Optional[Any] =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase : str =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
lowercase : Any =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
lowercase : List[str] =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase : Union[str, Any] =input_ids.clamp(self.pad_token_id + 1 )
lowercase : Optional[Any] =decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase : Dict =self.get_config()
lowercase : Optional[int] =config.num_attention_heads
lowercase : List[Any] =self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase : int =self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Union[str, Any] =UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
lowercase : Tuple =model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
lowercase : List[str] =result.last_hidden_state
lowercase : Any =result.past_key_values
lowercase : str =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : Optional[Any] =UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
lowercase : Any =model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase : Any =model(UpperCAmelCase__ )
lowercase : List[str] =model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
lowercase , lowercase : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : Dict =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowercase : List[Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : int =model(UpperCAmelCase__ )['''last_hidden_state''']
lowercase : Any =model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
lowercase : Any =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : str =output_from_no_past[:, -1, random_slice_idx].detach()
lowercase : Union[str, Any] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : Optional[Any] =UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
lowercase : Optional[int] =model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCamelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCamelCase_ = [0.8, 0.9]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
lowercase : Dict =UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
lowercase : Union[str, Any] =config_and_inputs[0]
lowercase : Tuple =UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
lowercase : Optional[Any] ={
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
lowercase : List[str] ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase : Tuple =torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
lowercase : Any =model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase : Optional[Any] =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowercase : Dict =AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
lowercase : List[Any] =[
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowercase : Optional[int] =tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
lowercase : Tuple =torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Any =model.generate(input_ids.to(UpperCAmelCase__ ) )
lowercase : Tuple =[
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowercase : str =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
'''simple docstring'''
from math import pi, sqrt
def _lowerCAmelCase ( __magic_name__ : float ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_7_1.5:
raise OverflowError('''math range error''' )
elif num - int(__magic_name__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__magic_name__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowerCAmelCase ( ) -> None:
assert gamma(0.5 ) == sqrt(__magic_name__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = 1.0
while num:
UpperCamelCase_ = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any =processors[data_args.task_name]()
lowercase : Optional[int] =processor.get_labels()
lowercase : str =len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict:
lowercase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : Dict =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : List[Any] =trainer.evaluate()
lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
def _lowerCAmelCase ( __magic_name__ : str ) -> dict:
lowercase : Dict =f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__magic_name__ ).json()
def _lowerCAmelCase ( __magic_name__ : int = 10 ) -> list[dict]:
lowercase : str ='''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowercase : Tuple =requests.get(__magic_name__ ).json()[:max_stories]
return [get_hackernews_story(__magic_name__ ) for story_id in story_ids]
def _lowerCAmelCase ( __magic_name__ : int = 10 ) -> str:
lowercase : Any =hackernews_top_stories(__magic_name__ )
return "\n".join('''* [{title}]({url})'''.format(**__magic_name__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 92 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92 | 1 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : Tuple =''''''
try:
with open(__magic_name__ , '''rb''' ) as binary_file:
lowercase : Dict =binary_file.read()
for dat in data:
lowercase : Any =f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : int ={'''0''': '''0''', '''1''': '''1'''}
lowercase , lowercase : List[str] ='''''', ''''''
lowercase : Union[str, Any] =len(__magic_name__ )
for i in range(len(__magic_name__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase : int =lexicon[curr_string]
result += last_match_id
lowercase : List[str] =last_match_id + '''0'''
if math.loga(__magic_name__ ).is_integer():
lowercase : int ={}
for curr_key in list(__magic_name__ ):
lowercase : int =lexicon.pop(__magic_name__ )
lowercase : Any =new_lex
lowercase : Any =last_match_id + '''1'''
index += 1
lowercase : int =''''''
return result
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> None:
lowercase : Optional[Any] =8
try:
with open(__magic_name__ , '''wb''' ) as opened_file:
lowercase : Any =[
to_write[i : i + byte_length]
for i in range(0 , len(__magic_name__ ) , __magic_name__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__magic_name__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : int =0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase : List[str] =data_bits[counter:]
lowercase : List[Any] =data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> None:
lowercase : Optional[int] =read_file_binary(__magic_name__ )
lowercase : Dict =remove_prefix(__magic_name__ )
lowercase : Optional[Any] =decompress_data(__magic_name__ )
write_file_binary(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : str ):
'''simple docstring'''
# test for the above condition
self.test()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =0
lowercase : List[Any] =False
while not completed:
if counter == 1:
self.reset()
lowercase : Dict =self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase , lowercase , lowercase : str =self.update(UpperCAmelCase__ )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , UpperCAmelCase__ : List[int] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase : Union[str, Any] =token_ids
lowercase : Tuple =len(self.token_ids )
lowercase : Tuple =-1 # the index of the currently fulfilled step
lowercase : int =False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Optional[int] =False
lowercase : int =False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowercase : Any =True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase : List[str] =True
lowercase : List[Any] =completed
else:
# failed to make progress.
lowercase : Union[str, Any] =True
self.reset()
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =0
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Dict =PhrasalConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : Optional[Any] =self.fulfilled_idx
lowercase : List[str] =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : Dict=True ):
'''simple docstring'''
lowercase : str =max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowercase : Any ={}
for token_ids in nested_token_ids:
lowercase : Any =root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowercase : int ={}
lowercase : Union[str, Any] =level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
lowercase : Dict =root
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =self.trie
for current_token in current_seq:
lowercase : List[str] =start[current_token]
lowercase : Optional[Any] =list(start.keys() )
return next_tokens
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[str] =self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : List[str] , UpperCAmelCase__ : List[List[int]] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase : Any =DisjunctiveTrie(UpperCAmelCase__ )
lowercase : Tuple =nested_token_ids
lowercase : Dict =self.trie.max_height
lowercase : List[str] =[]
lowercase : str =False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Union[str, Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Any =False
lowercase : Optional[Any] =False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowercase : Dict =True
else:
lowercase : Tuple =True
self.reset()
lowercase : Union[str, Any] =self.trie.reached_leaf(self.current_seq )
lowercase : int =completed
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =[]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Any=False ):
'''simple docstring'''
lowercase : Dict =DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : int =self.current_seq
lowercase : Dict =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : List[Constraint] ):
'''simple docstring'''
lowercase : List[str] =constraints
# max # of steps required to fulfill a given constraint
lowercase : str =max([c.seqlen for c in constraints] )
lowercase : Any =len(UpperCAmelCase__ )
lowercase : Union[str, Any] =False
self.init_state()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =[]
lowercase : Optional[Any] =None
lowercase : List[Any] =[constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase : List[str] =constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowercase : str =self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase , lowercase : Optional[Any] =self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase , lowercase : List[Any] =False, False
if self.completed:
lowercase : Tuple =True
lowercase : Union[str, Any] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase , lowercase , lowercase : Union[str, Any] =self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowercase : List[str] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase : Optional[Any] =None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase : int =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowercase , lowercase , lowercase : List[Any] =pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowercase : Union[str, Any] =None
if not complete and stepped:
lowercase : Any =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase : int =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase : List[Any] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=True ):
'''simple docstring'''
lowercase : int =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase : Tuple =[
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase : Dict =self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowercase : int =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
from math import ceil
def _lowerCAmelCase ( __magic_name__ : int = 1001 ) -> int:
lowercase : List[str] =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase : str =2 * i + 1
lowercase : List[Any] =2 * i
lowercase : str =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def _lowerCAmelCase ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase : List[str] =np.shape(__magic_name__ )
lowercase : Optional[Any] =np.shape(__magic_name__ )
lowercase : Dict =np.shape(__magic_name__ )
if shape_a[0] != shape_b[0]:
lowercase : Optional[Any] =(
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__magic_name__ )
if shape_b[1] != shape_c[1]:
lowercase : Optional[Any] =(
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__magic_name__ )
lowercase : List[str] =pseudo_inv
if a_inv is None:
try:
lowercase : Optional[Any] =np.linalg.inv(__magic_name__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Dict =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Union[str, Any] =np.array([[2, 1], [6, 3]] )
lowercase : Union[str, Any] =schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =np.block([[a, b], [b.T, c]] )
lowercase : Union[str, Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[str] =np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[Any] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Tuple =np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[int] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Optional[Any] =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 92 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowerCAmelCase ( __magic_name__ : Any=None ) -> Any:
lowercase : Union[str, Any] =argparse.ArgumentParser(add_help=__magic_name__ , allow_abbrev=__magic_name__ )
# The main config parser
lowercase : int =config_command_parser(__magic_name__ )
# The subparser to add commands to
lowercase : str =config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(__magic_name__ , parents=[parent_parser] )
update_command_parser(__magic_name__ , parents=[parent_parser] )
return config_parser
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict =get_config_parser()
lowercase : str =config_parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = CLIPConfig
lowerCamelCase_ = ['CLIPEncoderLayer']
def __init__( self : Optional[Any] , UpperCAmelCase__ : CLIPConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase__ )
lowercase : List[str] =CLIPVisionModelWithProjection(config.vision_config )
lowercase : int =nn.Linear(config.vision_config.projection_dim , 1 )
lowercase : Any =nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=0.5 , UpperCAmelCase__ : int=0.5 ):
'''simple docstring'''
lowercase : str =self.vision_model(UpperCAmelCase__ )[0]
lowercase : List[str] =self.p_head(UpperCAmelCase__ )
lowercase : int =nsfw_detected.flatten()
lowercase : Optional[Any] =nsfw_detected > p_threshold
lowercase : Union[str, Any] =nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
lowercase : Union[str, Any] =np.zeros(images[idx].shape )
lowercase : str =self.w_head(UpperCAmelCase__ )
lowercase : Optional[int] =watermark_detected.flatten()
lowercase : Tuple =watermark_detected > w_threshold
lowercase : Union[str, Any] =watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
lowercase : int =np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 92 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.