code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> float:
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> float:
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 237 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Tuple = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["projector.weight"]
a_ : Dict = downstream_dict["projector.bias"]
a_ : Tuple = downstream_dict["model.post_net.linear.weight"]
a_ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = downstream_dict["model.linear.weight"]
a_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["connector.weight"]
a_ : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ : int = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ : List[str] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
a_ : List[str] = checkpoint["Downstream"]
a_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ : Any = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
a_ : Any = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 237 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=False , ) -> str:
snake_case_ = size if size is not None else {"""height""": 20, """width""": 20}
snake_case_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_reduce_labels
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase( ) -> Tuple:
'''simple docstring'''
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(dataset[0]["""file"""] )
snake_case_ = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCamelCase( ) -> str:
'''simple docstring'''
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(ds[0]["""file"""] )
snake_case_ = Image.open(ds[1]["""file"""] )
snake_case_ = Image.open(ds[2]["""file"""] )
snake_case_ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Optional[int] = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = BeitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self ) -> Any:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
snake_case_ = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
snake_case_ = image_processing(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_batch_inputs()
snake_case_ = image_processing(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowerCAmelCase_ ( self ) -> List[Any]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
snake_case_ = True
snake_case_ = image_processing(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 ) | 161 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = os.path.abspath(lowercase_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case_ = tf.train.list_variables(lowercase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase_ )
# read data
snake_case_ = tf.train.load_variable(lowercase_ , lowercase_ )
names.append("""/""".join(lowercase_ ) )
arrays.append(lowercase_ )
logger.info(f'''Read a total of {len(lowercase_ ):,} layers''' )
# Sanity check
if len(set(lowercase_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(lowercase_ ) )})''' )
snake_case_ = list(set(lowercase_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase_ , lowercase_ ):
snake_case_ = full_name.split("""/""" )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(lowercase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case_ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """embeddings""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case_ = getattr(lowercase_ , """encoder""" )
snake_case_ = getattr(lowercase_ , """layer""" )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case_ = getattr(lowercase_ , """pooler""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case_ = getattr(lowercase_ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case_ = getattr(lowercase_ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case_ = getattr(lowercase_ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case_ = getattr(lowercase_ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case_ = getattr(lowercase_ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case_ = getattr(lowercase_ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case_ = getattr(lowercase_ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case_ = getattr(lowercase_ , """intermediate""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case_ = getattr(lowercase_ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case_ = getattr(lowercase_ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case_ = """.""".join(lowercase_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase_ ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(lowercase_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(lowercase_ )
snake_case_ = BertModel(lowercase_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 161 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
SCREAMING_SNAKE_CASE : Optional[Any] = {"bert_for_seq_generation": 512}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = []
_a = ["""input_ids""", """attention_mask"""]
def __init__( self, _lowercase, _lowercase="<s>", _lowercase="</s>", _lowercase="<unk>", _lowercase="<pad>", _lowercase="<::::>", _lowercase = None, **_lowercase, ) -> None:
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_lowercase, eos_token=_lowercase, unk_token=_lowercase, pad_token=_lowercase, sep_token=_lowercase, sp_model_kwargs=self.sp_model_kwargs, **_lowercase, )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
@property
def a__ ( self ) -> str:
return self.sp_model.get_piece_size()
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self, _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase, out_type=_lowercase )
def a__ ( self, _lowercase ) -> str:
return self.sp_model.piece_to_id(_lowercase )
def a__ ( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.sp_model.IdToPiece(_lowercase )
return token
def a__ ( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def a__ ( self, _lowercase, _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase, 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
| 294 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : List[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( lowerCAmelCase__: Dict ,lowerCAmelCase__: int ,lowerCAmelCase__: Tuple=None ) -> Union[str, Any]:
if rng is None:
SCREAMING_SNAKE_CASE_ = random.Random()
SCREAMING_SNAKE_CASE_ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_ = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase__ ,dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: str=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ids_tensor(lowerCAmelCase__ ,vocab_size=2 ,rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_ = 1
return attn_mask
@require_flax
class snake_case :
"""simple docstring"""
_a = None
_a = ()
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_ = jnp.ones_like(_lowercase )
SCREAMING_SNAKE_CASE_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = pt_model_class(_lowercase ).eval()
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_lowercase, flax_model.params )
SCREAMING_SNAKE_CASE_ = flax_model.generate(_lowercase ).sequences
SCREAMING_SNAKE_CASE_ = pt_model.generate(torch.tensor(_lowercase, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0.8
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 0.3
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
SCREAMING_SNAKE_CASE_ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE_ = 'Hello world'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase, 'do_samples' ):
model.generate(_lowercase, do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase, 'foo' ):
SCREAMING_SNAKE_CASE_ = {'foo': 'bar'}
model.generate(_lowercase, **_lowercase )
| 294 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a : List[str] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 522 | from math import factorial
__a : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase ) )
def UpperCAmelCase ( lowercase = 60 , lowercase = 1000000 ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not isinstance(lowercase , lowercase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
__lowercase = 0
# the cached sizes of the previous chains
__lowercase = {}
for start_chain_element in range(1 , lowercase ):
# The temporary set will contain the elements of the chain
__lowercase = set()
__lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase )
chain_set_length += 1
__lowercase = digit_factorial_sum(lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''') | 522 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case__ ( ) -> Tuple:
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
return image
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = dct.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ = torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) )
UpperCAmelCase_ = qkv_bias
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = 364 if "coco" in model_name else 224
UpperCAmelCase_ = BlipaVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
UpperCAmelCase_ = BlipaConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ) -> Any:
UpperCAmelCase_ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
UpperCAmelCase_ = tokenizer("\n" , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ = get_blipa_config(__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = BlipaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
UpperCAmelCase_ , UpperCAmelCase_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_model_and_preprocess(
name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
original_model.eval()
print("Done!" )
# update state dict keys
UpperCAmelCase_ = original_model.state_dict()
UpperCAmelCase_ = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith("Qformer.bert" ):
UpperCAmelCase_ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
UpperCAmelCase_ = key.replace("self" , "attention" )
if "opt_proj" in key:
UpperCAmelCase_ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
UpperCAmelCase_ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
UpperCAmelCase_ = key.replace("opt" , "language" )
if key.startswith("t5" ):
UpperCAmelCase_ = key.replace("t5" , "language" )
UpperCAmelCase_ = val
# read in qv biases
read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ = load_demo_image()
UpperCAmelCase_ = vis_processors["eval"](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(__SCREAMING_SNAKE_CASE )
# create processor
UpperCAmelCase_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = BlipaProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
hf_model.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
UpperCAmelCase_ = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits
else:
UpperCAmelCase_ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
UpperCAmelCase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase_ = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__SCREAMING_SNAKE_CASE )
else:
# cast to same type
UpperCAmelCase_ = logits.dtype
assert torch.allclose(original_logits.to(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
UpperCAmelCase_ = ""
UpperCAmelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = original_model.generate({"image": original_pixel_values} )
UpperCAmelCase_ = hf_model.generate(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = input_ids.shape[1]
UpperCAmelCase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [text.strip() for text in output_text]
print("HF generation:" , __SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 579 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE = True
except ImportError:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , *lowerCAmelCase ):
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def A__ ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(lowerCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase , extra_context=lowerCAmelCase , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCAmelCase ):
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(lowerCAmelCase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(lowerCAmelCase , "w" ) as new_file:
with open(lowerCAmelCase ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase , lowerCAmelCase )
# Remove original file
remove(lowerCAmelCase )
# Move new file
move(lowerCAmelCase , lowerCAmelCase )
def skip_units(lowerCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase ):
with open(lowerCAmelCase ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase )
remove(lowerCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCAmelCase )
| 579 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _a ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = PriorTransformer
__SCREAMING_SNAKE_CASE = 'hidden_states'
@property
def __lowerCAmelCase ( self ):
_lowercase =4
_lowercase =8
_lowercase =7
_lowercase =floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __lowerCAmelCase ( self , lowerCAmelCase_=0 ):
torch.manual_seed(lowerCAmelCase_ )
_lowercase =4
_lowercase =8
_lowercase =7
_lowercase =torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __lowerCAmelCase ( self ):
return (4, 8)
@property
def __lowerCAmelCase ( self ):
return (4, 8)
def __lowerCAmelCase ( self ):
_lowercase ={
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
_lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCAmelCase_ )
_lowercase =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.prepare_init_args_and_inputs_for_common()
_lowercase =self.model_class(**lowerCAmelCase_ )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
_lowercase =model.to(lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , "set_default_attn_processor" ):
model.set_default_attn_processor()
_lowercase =self.get_dummy_seed_input()
with torch.no_grad():
_lowercase =model(**lowerCAmelCase_ )[0]
_lowercase =output[0, :5].flatten().cpu()
print(lowerCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_lowercase =torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-2 ) )
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_=1 , lowerCAmelCase_=768 , lowerCAmelCase_=77 , lowerCAmelCase_=0 ):
torch.manual_seed(lowerCAmelCase_ )
_lowercase =batch_size
_lowercase =embedding_dim
_lowercase =num_embeddings
_lowercase =torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_lowercase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(lowerCAmelCase_ )
_lowercase =self.get_dummy_seed_input(seed=lowerCAmelCase_ )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
_lowercase =sample[0, :8].flatten().cpu()
print(lowerCAmelCase_ )
_lowercase =torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
| 594 | from math import factorial
def __lowerCamelCase ( __a : int , __a : int , __a : float ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_lowercase =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowercase =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 594 | 1 |
__a = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 377 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Dict=1_0 , __lowerCamelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , __lowerCamelCase : Optional[int]=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : str=True , __lowerCamelCase : str="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : int=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = TFResNetModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFResNetForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFResNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFResNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 377 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=False ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =len(set_a.intersection(lowercase_ ) )
if alternative_union:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =len(lowercase_ ) + len(lowercase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =len(set_a.union(lowercase_ ) )
return intersection / union
if isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ : Any =[element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =len(lowercase_ ) + len(lowercase_ )
return len(lowercase_ ) / union
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =set_a + [element for element in set_b if element not in set_a]
return len(lowercase_ ) / len(lowercase_ )
return len(lowercase_ ) / len(lowercase_ )
return None
if __name__ == "__main__":
_lowercase = {'a', 'b', 'c', 'd', 'e'}
_lowercase = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 720 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> List[Any]:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : Tuple =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Any =randn_tensor(__A , generator=__A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Optional[int] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.add_noise_to_input(__A , __A , generator=__A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Dict =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : int =self.scheduler.step(__A , __A , __A , __A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Tuple =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler.step_correct(
__A , __A , __A , __A , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[int] =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 431 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE (a_ , unittest.TestCase ):
"""simple docstring"""
__a =PhobertTokenizer
__a =False
def UpperCamelCase__ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_a = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_a = ['''#version: 0.2''', '''l à</w>''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
def UpperCamelCase__ ( self : str , **__a : List[Any] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase__ ( self : int , __a : Union[str, Any] ):
_a = '''Tôi là VinAI Research'''
_a = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCamelCase__ ( self : Any ):
_a = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''Tôi là VinAI Research'''
_a = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_a = tokenizer.tokenize(_lowercase )
print(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_a = tokens + [tokenizer.unk_token]
_a = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
| 692 | """simple docstring"""
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : str =(((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : int =[2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : Any =len(train_data)
SCREAMING_SNAKE_CASE__ : List[Any] =0.009
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) ->List[str]:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) ->List[str]:
_lowerCamelCase : Tuple = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Optional[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def UpperCamelCase ( ) ->Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : Dict = 0.000002
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = 0
while True:
j += 1
_lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = get_cost_derivative(i - 1 )
_lowerCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ) ->Optional[Any]:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 434 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "lilt"
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=None , _a=4 , _a=1_024 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = classifier_dropout
lowerCamelCase = channel_shrink_ratio
lowerCamelCase = max_ad_position_embeddings
| 703 |
"""simple docstring"""
from collections.abc import Sequence
def a__ ( snake_case__ , snake_case__ = False ) -> float:
if not arr:
return 0
lowerCamelCase = 0 if allow_empty_subarrays else float("""-inf""" )
lowerCamelCase = 0.0
for num in arr:
lowerCamelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase : Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 533 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {}
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=1 , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
_lowerCAmelCase = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = placeholder_token + f'''_{i}'''
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCAmelCase = output
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase = self.token_map[placeholder_token]
_lowerCAmelCase = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCAmelCase = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
_lowerCAmelCase = text.replace(_lowerCAmelCase , " ".join(_lowerCAmelCase ) )
return text
def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Optional[int]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Tuple:
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 18 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
UpperCamelCase : List[str] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase : int = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase : List[str] = pipeline(
"video-classification" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[str] = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 173 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
def __init__( self : Dict , _A : Optional[int] , _A : Tuple=13 , _A : List[str]=30 , _A : Union[str, Any]=2 , _A : Optional[int]=3 , _A : Dict=True , _A : Optional[int]=True , _A : Tuple=32 , _A : List[Any]=5 , _A : Any=4 , _A : Tuple=37 , _A : List[str]="gelu" , _A : Dict=0.1 , _A : str=0.1 , _A : Optional[Any]=10 , _A : Union[str, Any]=0.02 , _A : Tuple=3 , _A : str=None , _A : Any=2 , ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = parent
lowercase : Optional[Any] = batch_size
lowercase : List[str] = image_size
lowercase : int = patch_size
lowercase : Union[str, Any] = num_channels
lowercase : Tuple = is_training
lowercase : str = use_labels
lowercase : List[str] = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Any = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : str = type_sequence_label_size
lowercase : List[Any] = initializer_range
lowercase : List[Any] = scope
lowercase : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase : int = (image_size // patch_size) ** 2
lowercase : Optional[Any] = num_patches + 2
def __a ( self : str ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : int ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = DeiTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Tuple , _A : Dict , _A : List[Any] , _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : str = DeiTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase : str = 1
lowercase : List[Any] = DeiTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self : Any , _A : List[Any] , _A : Tuple , _A : str ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = self.type_sequence_label_size
lowercase : Union[str, Any] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase : int = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : List[Any] = 1
lowercase : Tuple = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
lowercase
) : List[str] = config_and_inputs
lowercase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[Any] = False
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : Dict = DeiTModelTester(self )
lowercase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __a ( self : Tuple ) -> str:
"""simple docstring"""
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(lowerCamelCase_ )
lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] = [*signature.parameters.keys()]
lowercase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __a ( self : int , _A : List[str] , _A : Optional[Any] , _A : Optional[int]=False ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowercase : Tuple = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowercase : Any = model(**lowerCamelCase_ ).loss
loss.backward()
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase : Union[str, Any] = False
lowercase : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase : Dict = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
lowercase : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowercase : Tuple = model(**lowerCamelCase_ ).loss
loss.backward()
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
lowercase : str = problem_type['''title''']
lowercase : Dict = problem_type['''num_labels''']
lowercase : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowercase : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if problem_type["num_labels"] > 1:
lowercase : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase_ ) as warning_list:
lowercase : List[str] = model(**lowerCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] = DeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCamelCase_ )
lowercase : Optional[Any] = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : str = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowercase : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowercase : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowercase : Tuple = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase : Optional[int] = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
lowercase : Tuple = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase : List[Any] = model(lowerCamelCase_ ) | 718 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : Dict = None
@experimental
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Optional[int] = len(__magic_name__ ) // num_proc
lowercase : List[str] = len(__magic_name__ ) % num_proc
lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__magic_name__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase , lowercase : Optional[int] = None, None
if not disable_tqdm:
lowercase , lowercase : Any = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__magic_name__ )} objects""" )
return mapped
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : List[Any] = None | 596 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip_2_vision_model"""
def __init__( self :List[Any] , lowercase_ :List[str]=14_08 , lowercase_ :int=61_44 , lowercase_ :List[str]=39 , lowercase_ :List[Any]=16 , lowercase_ :Union[str, Any]=2_24 , lowercase_ :List[str]=14 , lowercase_ :Dict="gelu" , lowercase_ :Tuple=0.0_0_0_0_1 , lowercase_ :Dict=0.0 , lowercase_ :Optional[int]=1E-10 , lowercase_ :Optional[Any]=True , **lowercase_ :List[str] , )-> int:
super().__init__(**lowercase_ )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
A__ = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowercase_ :Union[str, os.PathLike] , **lowercase_ :Optional[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip_2_qformer"""
def __init__( self :Dict , lowercase_ :int=3_05_22 , lowercase_ :Union[str, Any]=7_68 , lowercase_ :Tuple=12 , lowercase_ :Union[str, Any]=12 , lowercase_ :str=30_72 , lowercase_ :Any="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Dict=5_12 , lowercase_ :Optional[int]=0.0_2 , lowercase_ :Dict=1E-12 , lowercase_ :List[Any]=0 , lowercase_ :str="absolute" , lowercase_ :List[str]=2 , lowercase_ :Tuple=14_08 , **lowercase_ :Tuple , )-> List[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = cross_attention_frequency
A__ = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls :int , lowercase_ :Union[str, os.PathLike] , **lowercase_ :str )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """blip-2"""
__lowercase = True
def __init__( self :int , lowercase_ :List[Any]=None , lowercase_ :Dict=None , lowercase_ :Tuple=None , lowercase_ :List[str]=32 , **lowercase_ :Any )-> Union[str, Any]:
super().__init__(**lowercase_ )
if vision_config is None:
A__ = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
A__ = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
A__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
A__ = BlipaVisionConfig(**lowercase_ )
A__ = BlipaQFormerConfig(**lowercase_ )
A__ = text_config["model_type"] if "model_type" in text_config else "opt"
A__ = CONFIG_MAPPING[text_model_type](**lowercase_ )
A__ = self.text_config.tie_word_embeddings
A__ = self.text_config.is_encoder_decoder
A__ = num_query_tokens
A__ = self.vision_config.hidden_size
A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A__ = 1.0
A__ = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowercase_ :BlipaVisionConfig , lowercase_ :BlipaQFormerConfig , lowercase_ :PretrainedConfig , **lowercase_ :Any , )-> Optional[int]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.vision_config.to_dict()
A__ = self.qformer_config.to_dict()
A__ = self.text_config.to_dict()
A__ = self.__class__.model_type
return output
| 440 | 0 |
def _snake_case ( __snake_case ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 455 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ) -> int:
if hi < 0:
lowerCamelCase_ = len(_lowerCamelCase )
while lo < hi:
lowerCamelCase_ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase_ = mid + 1
else:
lowerCamelCase_ = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ) -> int:
if hi < 0:
lowerCamelCase_ = len(_lowerCamelCase )
while lo < hi:
lowerCamelCase_ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase_ = mid + 1
else:
lowerCamelCase_ = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int | None:
lowerCamelCase_ = 0
lowerCamelCase_ = len(_lowerCamelCase ) - 1
while left <= right:
lowerCamelCase_ = left + (right - left) // 2
lowerCamelCase_ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase_ = midpoint - 1
else:
lowerCamelCase_ = midpoint + 1
return None
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int | None:
lowerCamelCase_ = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> int | None:
if right < left:
return None
lowerCamelCase_ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = input('''Enter numbers separated by comma:\n''').strip()
_SCREAMING_SNAKE_CASE : int = sorted(int(item) for item in user_input.split(''','''))
_SCREAMING_SNAKE_CASE : Optional[Any] = int(input('''Enter a single number to be found in the list:\n'''))
_SCREAMING_SNAKE_CASE : int = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 549 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Dict=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : List[str] ) -> str:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
lowerCamelCase_ = RegNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = RegNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Optional[Any] ) -> str:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __snake_case , __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowerCamelCase_ = RegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Any ) -> Optional[Any]:
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCamelCase ( self : Any ) -> Dict:
pass
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : int ) -> Union[str, Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = RegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Optional[int] ) -> Dict:
lowerCamelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 549 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCAmelCase : Any = parse(importlib.metadata.version('torch'))
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__magic_name__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(A_, A_ ):
__magic_name__ = parse(importlib.metadata.version(A_ ) )
return operation(A_, parse(A_ ) )
def a__ ( A_, A_ ):
'''simple docstring'''
return compare_versions(A_, A_, A_ )
| 76 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__lowerCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1)
__lowerCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
lowerCAmelCase : int
lowerCAmelCase : Node | None
class __magic_name__ :
def __init__( self : str ,_UpperCAmelCase : Iterable[int] ):
_a : Node | None = None
for i in sorted(_UpperCAmelCase ,reverse=_UpperCAmelCase ):
_a : int = Node(_UpperCAmelCase ,self.head )
def __iter__( self : str ):
_a : Optional[int] = self.head
while node:
yield node.data
_a : Optional[int] = node.next_node
def __len__( self : Dict ):
return sum(1 for _ in self )
def __str__( self : int ):
return " -> ".join([str(_UpperCAmelCase ) for node in self] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCAmelCase_ ) + list(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 358 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
def __init__( self : int ,_UpperCAmelCase : bool = True ):
_a : dict[T, list[T]] = {} # dictionary of lists
_a : Tuple = directed
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : T ,_UpperCAmelCase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
self.adj_list[destination_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_a : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase )
_a : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_a : Union[str, Any] = [destination_vertex]
_a : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_a : int = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_a : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_a : Tuple = [destination_vertex]
_a : str = []
return self
def __repr__( self : int ):
return pformat(self.adj_list )
| 358 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase_ : str = get_logger()
UpperCAmelCase_ : Optional[dict] = None
class a ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> Optional[Any]:
super().__init__(features=__SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_a : int = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
_a : Optional[Any] = str(jax.devices()[0] )
_a : List[Any] = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> str:
import jax
return {str(__SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0 )
return column
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_a : List[str] = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_a : Optional[int] = {'dtype': jnp.intaa}
else:
_a : List[str] = {'dtype': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_a : str = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_a : Dict = np.asarray(__SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> str:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__SCREAMING_SNAKE_CASE , '__array__' ) and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array ):
_a : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a : List[Any] = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE )
_a : str = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
_a : Any = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
_a : List[Any] = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = self._consolidate(__SCREAMING_SNAKE_CASE )
return column
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
_a : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE )
_a : Any = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE )
_a : int = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
for column_name in batch:
_a : str = self._consolidate(batch[column_name] )
return batch
| 716 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase_ : str = TypeVar("T")
class a ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase : deque[T] # Cache store of keys
__lowerCAmelCase : set[T] # References of the keys in cache
__lowerCAmelCase : int = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase_ ) -> None:
_a : List[str] = deque()
_a : int = set()
if not n:
_a : List[str] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_a : Union[str, Any] = n
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_a : Dict = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def __UpperCamelCase ( self ) -> None:
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =BartphoTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> List[str]:
super().setUp()
a__ =['▁This', '▁is', '▁a', '▁t', 'est']
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ ={'unk_token': '<unk>'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
a__ =BartphoTokenizer(lowercase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Dict:
a__ ='This is a là test'
a__ ='This is a<unk><unk> test'
return input_text, output_text
def __UpperCamelCase ( self) -> Any:
a__ =BartphoTokenizer(lowercase_ , self.monolingual_vocab_file , **self.special_tokens_map)
a__ ='This is a là test'
a__ ='▁This ▁is ▁a ▁l à ▁t est'.split()
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =tokens + [tokenizer.unk_token]
a__ =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
| 20 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 384}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_( self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=A , default_to_square=A )
_SCREAMING_SNAKE_CASE = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 314 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_ ( lowercase: int ) -> Dict:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ ( lowercase: str ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def lowerCAmelCase_ ( lowercase: Union[str, Any] ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase: Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def lowerCAmelCase_ ( lowercase: Union[str, Any] , lowercase: Tuple ) -> Tuple:
'''simple docstring'''
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_UpperCamelCase: str = 0
# Doctest custom flag to ignore output.
UpperCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase_ = doctest.OutputChecker
class __magic_name__ ( __a ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowercase , _lowercase , _lowercase )
UpperCAmelCase_ = CustomOutputChecker
UpperCAmelCase_ = HfDoctestModule
UpperCAmelCase_ = HfDocTestParser | 271 | from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( lowercase: str , lowercase: complex , lowercase: str = "x" , lowercase: float = 10**-10 , lowercase: int = 1 , ) -> complex:
'''simple docstring'''
_UpperCamelCase: Any = symbols(lowercase )
_UpperCamelCase: str = lambdify(lowercase , lowercase )
_UpperCamelCase: str = lambdify(lowercase , diff(lowercase , lowercase ) )
_UpperCamelCase: Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCamelCase: int = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 271 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : str=0.999 , UpperCamelCase : Union[str, Any]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_a = []
for i in range(UpperCamelCase ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) )
return torch.tensor(UpperCamelCase , dtype=torch.floataa )
class A ( _a ,_a ):
lowercase_ = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ = 2
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 10_00 , lowerCAmelCase_ : float = 0.0_0_0_8_5 , lowerCAmelCase_ : float = 0.0_1_2 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ) -> Dict:
"""simple docstring"""
if trained_betas is not None:
_a = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_a = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = use_karras_sigmas
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=None ) -> Optional[int]:
"""simple docstring"""
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
_a = self.index_for_timestep(lowerCAmelCase_ )
_a = self.sigmas[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ) -> int:
"""simple docstring"""
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = np.log(lowerCAmelCase_ )
_a = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_a = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_a = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_a = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.from_numpy(lowerCAmelCase_ )
_a = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_a = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_a = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_a = None
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = np.log(lowerCAmelCase_ )
# get distribution
_a = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = log_sigmas[low_idx]
_a = log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Dict ) -> torch.FloatTensor:
"""simple docstring"""
_a = in_sigmas[-1].item()
_a = in_sigmas[0].item()
_a = 7.0 # 7.0 is the value used in the paper
_a = np.linspace(0 , 1 , lowerCAmelCase_ )
_a = sigma_min ** (1 / rho)
_a = sigma_max ** (1 / rho)
_a = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.dt is None
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
_a = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_next
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_next
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
_a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_next - sigma_hat
# store for 2nd order step
_a = derivative
_a = dt
_a = sample
else:
# 2. 2nd order / Heun's method
_a = (sample - pred_original_sample) / sigma_next
_a = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a = self.dt
_a = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a = None
_a = None
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.config.num_train_timesteps
| 377 |
'''simple docstring'''
_snake_case : Any = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def snake_case_ (UpperCamelCase : dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = set()
# keep track of all the paths to be checked
_a = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a = queue.pop(0 )
# get the last node from the path
_a = path[-1]
if node not in explored:
_a = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a = list(UpperCamelCase )
new_path.append(UpperCamelCase )
queue.append(UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def snake_case_ (UpperCamelCase : dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a = [start]
_a = set(UpperCamelCase )
# Keep tab on distances from `start` node.
_a = {start: 0, target: -1}
while queue:
_a = queue.pop(0 )
if node == target:
_a = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase )
queue.append(UpperCamelCase )
_a = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 377 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 467 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ ( snake_case_ : str ) ->str:
return "".join(sorted(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : str ) ->list[str]:
return word_by_signature[signature(snake_case_ )]
lowerCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams)) | 174 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase__ = {
"do_resize": True,
"size": {"height": 1_8, "width": 1_8},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
UpperCamelCase__ = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] , **lowerCamelCase_ :int ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Tuple , **lowerCamelCase_ :Tuple ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase__ = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
UpperCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase__ ( self :str ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(lowerCamelCase_ , return_tensors="np" )
UpperCamelCase__ = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self :List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = processor(text=lowerCamelCase_ )
UpperCamelCase__ = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase_ ):
processor()
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 304 | """simple docstring"""
from math import factorial, radians
def snake_case__ ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ):
"""simple docstring"""
UpperCamelCase__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCamelCase__ = radians(_snake_case )
UpperCamelCase__ = angle_in_radians
UpperCamelCase__ = 3
UpperCamelCase__ = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
UpperCamelCase__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__('doctest').testmod() | 304 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:str = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Any = {
"""gpt-neox-20b""": 2048,
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 528 | """simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__:List[Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCamelCase( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ):
if attention_mask is None:
__a = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0.02 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
__a = initializer_range
def a__ ( self ):
__a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a = shift_tokens_right(lowerCamelCase , 1 , 2 )
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , )
__a = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = 20
__a = model_class_name(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
__a = model.decode(lowerCamelCase , lowerCamelCase )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = 20
__a = model_class_name(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class snake_case__ ( unittest.TestCase ):
_snake_case : Dict = 99
def a__ ( self ):
__a = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a = input_ids.shape[0]
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a__ ( self ):
__a , __a , __a = self._get_config_and_data()
__a = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__a = lm_model(input_ids=lowerCamelCase )
__a = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def a__ ( self ):
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def a__ ( self ):
__a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a = shift_tokens_right(lowerCamelCase , 1 , 2 )
__a = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
__a = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case__ ( snake_case_, unittest.TestCase, snake_case_ ):
_snake_case : Any = True
_snake_case : int = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a__ ( self ):
__a = FlaxBlenderbotSmallModelTester(self )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
__a = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__a = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a__ ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a = np.ones((1, 1) ) * model.config.eos_token_id
__a = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 528 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = set({"""(""", """[""", """{"""} )
UpperCAmelCase__ : int = set({""")""", """]""", """}"""} )
UpperCAmelCase__ : Optional[int] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = input("""Enter sequence of brackets: """ )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase , """is balanced""" )
else:
print(_UpperCAmelCase , """is not balanced""" )
if __name__ == "__main__":
main() | 716 |
'''simple docstring'''
import os
def _UpperCamelCase ( UpperCamelCase__ = "input.txt" ):
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
UpperCAmelCase__ : Tuple = [
[int(UpperCamelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase__ : Optional[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = len(matrix[0] )
UpperCAmelCase__ : Optional[int] = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase__ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""") | 113 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ) -> int:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def __A ( self : List[Any] ) -> List[Any]:
__magic_name__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __A ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ = None
ops.enable_eager_execution_internal()
__magic_name__ = tf.config.list_physical_devices("CPU" )
if len(_UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__magic_name__ = tf.config.list_logical_devices(device_type="CPU" )
__magic_name__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__magic_name__ = GradientAccumulator()
__magic_name__ = tf.Variable([4.0, 3.0] )
__magic_name__ = create_optimizer(5e-5 , 10 , 5 )
__magic_name__ = tf.Variable([0.0, 0.0] , trainable=_UpperCAmelCase )
def accumulate_on_replica(_lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
with strategy.scope():
__magic_name__ = strategy.experimental_local_results(_UpperCAmelCase )
local_variables[0].assign(_UpperCAmelCase )
local_variables[1].assign(_UpperCAmelCase )
strategy.run(_UpperCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCAmelCase )
def _check_local_values(_lowerCamelCase : Any , _lowerCamelCase : Any ):
__magic_name__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _UpperCAmelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _UpperCAmelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 664 | def UpperCAmelCase__( __UpperCAmelCase : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case : str = 4
__snake_case : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 576 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__( __a , unittest.TestCase ):
UpperCAmelCase_ : List[Any] = MgpstrTokenizer
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Optional[int] = False
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# fmt: off
snake_case__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case__ = dict(zip(a_ , range(len(a_ ) ) ) )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
def __lowerCAmelCase( self : Optional[Any] , **__UpperCamelCase : int ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowerCAmelCase( self : str , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case__ = """tester"""
snake_case__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
snake_case__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case__ = tokenizer.encode([special_token] , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
snake_case__ = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
snake_case__ = self.get_input_output_texts(a_ )
snake_case__ = tokenizer.tokenize(a_ )
snake_case__ = tokenizer.convert_tokens_to_ids(a_ )
snake_case__ = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case__ = tokenizer.convert_ids_to_tokens(a_ )
self.assertNotEqual(len(a_ ) , 0 )
snake_case__ = tokenizer.decode(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , a_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __lowerCAmelCase( self : str ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
pass | 706 |
'''simple docstring'''
from __future__ import annotations
class __magic_name__:
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ , snake_case__ = text, pattern
snake_case__ , snake_case__ = len(__UpperCamelCase ), len(__UpperCamelCase )
def __lowerCAmelCase( self : Dict , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase( self : Any , __UpperCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case__ = self.mismatch_in_text(__UpperCamelCase )
if mismatch_index == -1:
positions.append(__UpperCamelCase )
else:
snake_case__ = self.match_in_pattern(self.text[mismatch_index] )
snake_case__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a__ = '''ABAABA'''
a__ = '''AB'''
a__ = BoyerMooreSearch(text, pattern)
a__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 566 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
def decorator(A_ : List[Any] ):
_lowerCamelCase : Dict = getattr(_UpperCAmelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCAmelCase, '''handle_key''', _UpperCAmelCase )
return func
return decorator
def snake_case_ ( *A_ : Dict ):
'''simple docstring'''
def decorator(A_ : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = getattr(_UpperCAmelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCAmelCase, '''handle_key''', _UpperCAmelCase )
return func
return decorator
class __snake_case ( __UpperCamelCase):
def __new__( cls : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , '''key_handler''' ):
setattr(__lowerCAmelCase , '''key_handler''' , {} )
setattr(__lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCamelCase : List[Any] = getattr(__lowerCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
_lowerCamelCase : List[Any] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
"""simple docstring"""
_lowerCamelCase : int = get_character()
if char != KEYMAP["undefined"]:
_lowerCamelCase : List[str] = ord(__lowerCAmelCase )
_lowerCamelCase : List[str] = cls.key_handler.get(__lowerCAmelCase )
if handler:
_lowerCamelCase : Optional[int] = char
return handler(cls )
else:
return None
def snake_case_ ( cls : Dict ):
'''simple docstring'''
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 83 |
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : str ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = psutil.Process()
lowerCamelCase__ : Union[str, Any] = False
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = -1
while True:
lowerCamelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : int = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : List[Any] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 295 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Any=1_3 ,lowercase__ : Optional[Any]=7 ,lowercase__ : Tuple=True ,lowercase__ : Optional[Any]=True ,lowercase__ : Optional[int]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=9_9 ,lowercase__ : List[Any]=3_2 ,lowercase__ : Tuple=5 ,lowercase__ : Tuple=4 ,lowercase__ : Optional[int]=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Tuple=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Tuple=1_6 ,lowercase__ : List[str]=2 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Optional[Any]=3 ,lowercase__ : List[str]=4 ,lowercase__ : str=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ):
__lowercase = LlamaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,):
__lowercase = True
__lowercase = LlamaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = LlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,):
__lowercase = True
__lowercase = True
__lowercase = LlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,use_cache=lowercase__ ,)
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,output_hidden_states=lowercase__ ,)['''hidden_states'''][0]
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,past_key_values=lowercase__ ,output_hidden_states=lowercase__ ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = LlamaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict['''input_ids''']
__lowercase = input_ids.ne(1 ).to(lowercase__ )
__lowercase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = '''single_label_classification'''
__lowercase = input_dict['''input_ids''']
__lowercase = input_ids.ne(1 ).to(lowercase__ )
__lowercase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = '''multi_label_classification'''
__lowercase = input_dict['''input_ids''']
__lowercase = input_ids.ne(1 ).to(lowercase__ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = LlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 1_0] ,config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = LlamaModel(lowercase__ )
original_model.to(lowercase__ )
original_model.eval()
__lowercase = original_model(lowercase__ ).last_hidden_state
__lowercase = original_model(lowercase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {'''type''': scaling_type, '''factor''': 1_0.0}
__lowercase = LlamaModel(lowercase__ )
scaled_model.to(lowercase__ )
scaled_model.eval()
__lowercase = scaled_model(lowercase__ ).last_hidden_state
__lowercase = scaled_model(lowercase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
__lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) ,lowercase__ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,lowercase__ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
__lowercase = model(torch.tensor(lowercase__ ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) ,lowercase__ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,lowercase__ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
__lowercase = model(torch.tensor(lowercase__ ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) ,lowercase__ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,lowercase__ ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__lowercase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
__lowercase = model(torch.tensor(lowercase__ ) )
__lowercase = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,lowercase__ ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
__lowercase = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,lowercase__ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
__lowercase = '''Simply put, the theory of relativity states that '''
__lowercase = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
__lowercase = tokenizer.encode(lowercase__ ,return_tensors='''pt''' )
__lowercase = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=lowercase__ )
# greedy generation outputs
__lowercase = model.generate(lowercase__ ,max_new_tokens=6_4 ,top_p=lowercase__ ,temperature=1 ,do_sample=lowercase__ )
__lowercase = tokenizer.decode(generated_ids[0] ,skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
| 721 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 0 |
from statistics import mean, stdev
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ):
lowerCamelCase_ : Tuple = min(_UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = max(_UpperCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,_UpperCAmelCase ) for x in data]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ):
lowerCamelCase_ : Dict = mean(_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = stdev(_UpperCAmelCase )
# standardize data
return [round((x - mu) / (sigma) ,_UpperCAmelCase ) for x in data]
| 364 | import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
_A = PegasusConfig
_A = {}
_A = "gelu"
def __init__( self : Any , A_ : int , A_ : Any=13 , A_ : Union[str, Any]=7 , A_ : List[str]=True , A_ : Dict=False , A_ : Any=99 , A_ : Optional[int]=32 , A_ : Tuple=5 , A_ : Optional[Any]=4 , A_ : Tuple=37 , A_ : str=0.1 , A_ : str=0.1 , A_ : Any=20 , A_ : List[str]=2 , A_ : List[Any]=1 , A_ : str=0 , ) -> int:
"""simple docstring"""
lowerCamelCase_: int = parent
lowerCamelCase_: Any = batch_size
lowerCamelCase_: Optional[Any] = seq_length
lowerCamelCase_: Optional[Any] = is_training
lowerCamelCase_: str = use_labels
lowerCamelCase_: Union[str, Any] = vocab_size
lowerCamelCase_: Optional[int] = hidden_size
lowerCamelCase_: Dict = num_hidden_layers
lowerCamelCase_: Optional[int] = num_attention_heads
lowerCamelCase_: List[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: Optional[int] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Tuple = pad_token_id
lowerCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase_: int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_: Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_: Any = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : str , A_ : Tuple , A_ : Optional[int] , A_ : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_: Any = 20
lowerCamelCase_: Union[str, Any] = model_class_name(A_ )
lowerCamelCase_: List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
lowerCamelCase_: Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase_: Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_: Tuple = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , )
lowerCamelCase_: Dict = model.decode(A_ , A_ )
lowerCamelCase_: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase ( self : int , A_ : Union[str, Any] , A_ : List[str] , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = 20
lowerCamelCase_: Optional[int] = model_class_name(A_ )
lowerCamelCase_: int = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_: int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_: List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_: Optional[int] = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
lowerCamelCase_: int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: Optional[int] = model.decode(A_ , A_ , decoder_attention_mask=A_ )
lowerCamelCase_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase_: Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_A = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_A = True
_A = False
_A = False
_A = False
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = FlaxPegasusModelTester(self )
lowerCamelCase_: int = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A_ , A_ , A_ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_: Tuple = self._prepare_for_class(A_ , A_ )
lowerCamelCase_: List[str] = model_class(A_ )
@jax.jit
def encode_jitted(A_ : Optional[int] , A_ : List[str]=None , **A_ : int ):
return model.encode(input_ids=A_ , attention_mask=A_ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_: Optional[int] = encode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_: Dict = encode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_: Dict = model_class(A_ )
lowerCamelCase_: Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase_: Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A_ : Optional[Any] , A_ : List[Any] , A_ : Tuple ):
return model.decode(
decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_: List[str] = decode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_: Any = decode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_: str = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=A_ )
lowerCamelCase_: str = np.ones((1, 1) )
lowerCamelCase_: Dict = model(A_ )
self.assertIsNotNone(A_ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase_: str = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase_: List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCamelCase_: Dict = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCamelCase_: Dict = tokenizer(A_ , return_tensors="""np""" , truncation=A_ , max_length=5_12 , padding=A_ )
lowerCamelCase_: int = model.generate(**A_ , num_beams=2 ).sequences
lowerCamelCase_: Optional[int] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
assert tgt_text == decoded
| 423 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase :List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42 | 1 |
from __future__ import annotations
import time
lowerCamelCase__ : List[str] = list[tuple[int, int]]
lowerCamelCase__ : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = pos_x
lowercase__ : Optional[int] = pos_y
lowercase__ : List[Any] = (pos_y, pos_x)
lowercase__ : Optional[Any] = goal_x
lowercase__ : Union[str, Any] = goal_y
lowercase__ : Any = parent
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [self.start]
lowercase__ : int = False
def lowercase__ ( self):
'''simple docstring'''
while self.node_queue:
lowercase__ : List[Any] = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
lowercase__ : Union[str, Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_successors(SCREAMING_SNAKE_CASE_)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_)
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = []
for action in delta:
lowercase__ : Optional[Any] = parent.pos_x + action[1]
lowercase__ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , SCREAMING_SNAKE_CASE_))
return successors
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = node
lowercase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowercase__ : Tuple = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = False
def lowercase__ ( self):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase__ : Optional[int] = self.fwd_bfs.node_queue.pop(0)
lowercase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
lowercase__ : Optional[Any] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = current_bwd_node
lowercase__ : str = current_fwd_node
lowercase__ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_)
bwd_path.pop()
bwd_path.reverse()
lowercase__ : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ : Tuple = time.time()
lowerCamelCase__ : Union[str, Any] = BreadthFirstSearch(init, goal)
lowerCamelCase__ : List[Any] = bfs.search()
lowerCamelCase__ : str = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCamelCase__ : Tuple = time.time()
lowerCamelCase__ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase__ : Union[str, Any] = bd_bfs.search()
lowerCamelCase__ : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 12 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __lowercase ( A ):
__magic_name__ : List[Any] = '''efficientnet'''
def __init__( self , a__ = 3 , a__ = 6_0_0 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , a__ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 2_5_6_0 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.5 , a__ = 0.2 , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**a__ )
A_ = num_channels
A_ = image_size
A_ = width_coefficient
A_ = depth_coefficient
A_ = depth_divisor
A_ = kernel_sizes
A_ = in_channels
A_ = out_channels
A_ = depthwise_padding
A_ = strides
A_ = num_block_repeats
A_ = expand_ratios
A_ = squeeze_expansion_ratio
A_ = hidden_act
A_ = hidden_dim
A_ = pooling_type
A_ = initializer_range
A_ = batch_norm_eps
A_ = batch_norm_momentum
A_ = dropout_rate
A_ = drop_connect_rate
A_ = sum(a__ ) * 4
class __lowercase ( A ):
__magic_name__ : Any = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1E-5 | 141 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
UpperCamelCase = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**lowercase_ )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**lowercase_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase_ )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 707 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( __snake_case ):
def __init__( self : Dict , *__magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 1_2_8 # default number of calibration samples
def lowerCamelCase_ ( self : str , __magic_name__ : List[Any]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(__magic_name__ , description="""Calibration""" )
return DataLoader(
__magic_name__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__magic_name__ , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(__magic_name__ )
UpperCamelCase = self.model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args , calib=__magic_name__ )
model.eval()
quant_trainer.enable_calibration(__magic_name__ )
logger.info("""***** Running calibration *****""" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(__magic_name__ ):
# Prediction step
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prediction_step(__magic_name__ , __magic_name__ , prediction_loss_only=__magic_name__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = model
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str = "eval" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
self.log(__magic_name__ )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : str = "test" ):
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[Any]="./" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = next(iter(__magic_name__ ) )
# saving device - to make it consistent
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase = tuple(v.to(__magic_name__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(__magic_name__ )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(__magic_name__ , """module""" ) else model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = os.path.join(__magic_name__ , """model.onnx""" )
logger.info(F'exporting model to {output_model_file}' )
UpperCamelCase = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__magic_name__ , __magic_name__ , __magic_name__ , export_params=__magic_name__ , opset_version=1_3 , do_constant_folding=__magic_name__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__magic_name__ , )
logger.info("""onnx export finished""" )
| 181 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__snake_case = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
__snake_case = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def _snake_case ( self: int ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__lowerCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCamelCase : int = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCamelCase : str = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCamelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCamelCase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Any = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCamelCase : Optional[int] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__lowerCamelCase : Tuple = load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCamelCase : List[Any] = load_dataset('json' , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCamelCase : Dict = raw_datasets['train'].features['label'].names
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCamelCase : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : int = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCamelCase : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCamelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCamelCase : Optional[int] = {'Refused': 0, 'Entailed': 1}
__lowerCamelCase : Optional[int] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCamelCase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__lowerCamelCase : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCamelCase : Any = examples['statement']
__lowerCamelCase : Tuple = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__lowerCamelCase : Optional[Any] = raw_datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : int = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : List[str] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : int = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__lowerCamelCase : int = raw_datasets['test']
if data_args.max_predict_samples is not None:
__lowerCamelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE__ ) else p.predictions
__lowerCamelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCamelCase : Dict = default_data_collator
elif training_args.fpaa:
__lowerCamelCase : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 )
else:
__lowerCamelCase : int = None
# Initialize our Trainer
__lowerCamelCase : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__lowerCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : Optional[Any] = last_checkpoint
__lowerCamelCase : List[str] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = train_result.metrics
__lowerCamelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
__lowerCamelCase : Optional[int] = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCamelCase : Union[str, Any] = predict_dataset.remove_columns('label' )
__lowerCamelCase : str = trainer.predict(SCREAMING_SNAKE_CASE__ , metric_key_prefix='predict' ).predictions
__lowerCamelCase : str = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
__lowerCamelCase : Tuple = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCamelCase : List[Any] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 669 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Optional[int] , snake_case__ :List[str]=1E-12 ) -> Union[str, Any]:
_lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
_lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class A_ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : CLIPConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = FlaxCLIPVisionModule(self.config.vision_config )
_lowercase = nn.Dense(self.config.projection_dim ,use_bias=__A ,dtype=self.dtype )
_lowercase = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
_lowercase = self.param(
'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
_lowercase = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) )
_lowercase = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) )
def __call__( self : int ,__A : Optional[Any] ) -> int:
_lowercase = self.vision_model(__A )[1]
_lowercase = self.visual_projection(__A )
_lowercase = jax_cosine_distance(__A ,self.special_care_embeds )
_lowercase = jax_cosine_distance(__A ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_lowercase = 0.0
_lowercase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_lowercase = jnp.round(__A ,3 )
_lowercase = jnp.any(special_scores > 0 ,axis=1 ,keepdims=__A )
# Use a lower threshold if an image has any special care concept
_lowercase = is_special_care * 0.01
_lowercase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_lowercase = jnp.round(__A ,3 )
_lowercase = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPConfig
SCREAMING_SNAKE_CASE_ : List[str] = '''clip_input'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : str ,__A : CLIPConfig ,__A : Optional[Tuple] = None ,__A : int = 0 ,__A : jnp.dtype = jnp.floataa ,__A : bool = True ,**__A : Optional[Any] ,) -> str:
if input_shape is None:
_lowercase = (1, 224, 224, 3)
_lowercase = self.module_class(config=__A ,dtype=__A ,**__A )
super().__init__(__A ,__A ,input_shape=__A ,seed=__A ,dtype=__A ,_do_init=_do_init )
def __UpperCAmelCase ( self : Tuple ,__A : jax.random.KeyArray ,__A : Tuple ,__A : FrozenDict = None ) -> FrozenDict:
# init input tensor
_lowercase = jax.random.normal(__A ,__A )
_lowercase , _lowercase = jax.random.split(__A )
_lowercase = {'params': params_rng, 'dropout': dropout_rng}
_lowercase = self.module.init(__A ,__A )['params']
return random_params
def __call__( self : int ,__A : Union[str, Any] ,__A : dict = None ,) -> Dict:
_lowercase = jnp.transpose(__A ,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} ,jnp.array(__A ,dtype=jnp.floataa ) ,rngs={} ,) | 535 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ,*__A : Optional[int] ,**__A : List[Any] ) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 535 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def A (__lowerCamelCase :List[str] ):
return np.array_equal(lowercase_ , matrix.conjugate().T )
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = v.conjugate().T
_lowerCAmelCase = v_star.dot(lowercase_ )
assert isinstance(lowercase_ , np.ndarray )
return (v_star_dot.dot(lowercase_ )) / (v_star.dot(lowercase_ ))
def A ():
_lowerCAmelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowerCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase_ ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowercase_ , lowercase_ ) )
_lowerCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase_ ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowercase_ , lowercase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 5 |
def __magic_name__ ( lowercase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = set({"(", "[", "{"} )
UpperCamelCase = set({")", "]", "}"} )
UpperCamelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = input("Enter sequence of brackets: " )
if is_balanced(lowercase_ ):
print(lowercase_ , "is balanced" )
else:
print(lowercase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 606 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = RoCBertTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCamelCase : Union[str, Any] = {}
UpperCamelCase : List[Any] = {}
for i, value in enumerate(lowerCamelCase ):
UpperCamelCase : Any = i
UpperCamelCase : List[Any] = i
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase : Dict = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCamelCase : List[str] = {}
for i, token in enumerate(lowerCamelCase ):
UpperCamelCase : str = i
UpperCamelCase : int = RoCBertWordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
UpperCamelCase : List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : List[str] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCamelCase : Tuple = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
UpperCamelCase : int = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , "do_lower_case" ) else False
UpperCamelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : int = ["的", "人", "有"]
UpperCamelCase : Any = "".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : List[str] = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : str = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
UpperCamelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : List[str] = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : Any = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : Tuple = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
UpperCamelCase : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase : List[str] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase : Optional[Any] = tokenizer.encode("你好" , add_special_tokens=lowerCamelCase )
UpperCamelCase : Any = tokenizer.encode("你是谁" , add_special_tokens=lowerCamelCase )
UpperCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase : List[Any] = "你好,你是谁"
UpperCamelCase : Union[str, Any] = tokenizer.tokenize(lowerCamelCase )
UpperCamelCase : int = tokenizer.convert_tokens_to_ids(lowerCamelCase )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase )
UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase )
UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
lowerCamelCase , lowerCamelCase , lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : List[Any] = tokenizer.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 435 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 10
UpperCamelCase : Optional[Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
"id": datasets.Value("int64"),
})
UpperCamelCase : Union[str, Any] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(A)),
} , features=A , )
return dataset
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : int = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=A)
return filename
# FILE_CONTENT + files
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt"
UpperCamelCase : List[str] = FILE_CONTENT
with open(A , "w") as f:
f.write(A)
return filename
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
import bza
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "file.txt.bz2"
UpperCamelCase : Optional[int] = bytes(A , "utf-8")
with bza.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
import gzip
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
UpperCamelCase : List[str] = bytes(A , "utf-8")
with gzip.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase : str = tmp_path_factory.mktemp("data") / "file.txt.lz4"
UpperCamelCase : Dict = bytes(A , "utf-8")
with lza.frame.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Tuple):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt.7z"
with pyazr.SevenZipFile(A , "w") as archive:
archive.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Dict):
'''simple docstring'''
import tarfile
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "file.txt.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Any):
'''simple docstring'''
import lzma
UpperCamelCase : int = tmp_path_factory.mktemp("data") / "file.txt.xz"
UpperCamelCase : Union[str, Any] = bytes(A , "utf-8")
with lzma.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : List[Any]):
'''simple docstring'''
import zipfile
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any]):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "file.txt.zst"
UpperCamelCase : List[str] = bytes(A , "utf-8")
with zstd.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "file.xml"
UpperCamelCase : Any = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>")
with open(A , "w") as f:
f.write(A)
return filename
lowerCAmelCase_ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : Dict = datasets.Dataset.from_dict(A)
UpperCamelCase : Tuple = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : str = str(tmp_path_factory.mktemp("data") / "dataset.sqlite")
with contextlib.closing(sqlitea.connect(A)) as con:
UpperCamelCase : str = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)")
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(A , "w" , newline="") as f:
UpperCamelCase : Any = csv.DictWriter(A , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(A , "w" , newline="") as f:
UpperCamelCase : Dict = csv.DictWriter(A , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Any):
'''simple docstring'''
import bza
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(A , "rb") as f:
UpperCamelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any] , A : int , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Optional[Any] , A : Tuple):
'''simple docstring'''
UpperCamelCase : Tuple = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV")))
f.write(A , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV")))
return path
@pytest.fixture(scope="session")
def A__ ( A : str , A : int , A : int):
'''simple docstring'''
UpperCamelCase : int = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
UpperCamelCase : Union[str, Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
})
with open(A , "wb") as f:
UpperCamelCase : Optional[int] = pq.ParquetWriter(A , schema=A)
UpperCamelCase : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(A))] for k in DATA[0]} , schema=A)
writer.write_table(A)
writer.close()
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str]):
'''simple docstring'''
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "dataset.json")
UpperCamelCase : Any = {"data": DATA}
with open(A , "w") as f:
json.dump(A , A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Any):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.json")
UpperCamelCase : int = {"data": DATA_DICT_OF_LISTS}
with open(A , "w") as f:
json.dump(A , A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(A , "w") as f:
for item in DATA:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(A , "w") as f:
for item in DATA:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(A , "w") as f:
for item in DATA_312:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : str = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(A , "w") as f:
for item in DATA_STR:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any] , A : Optional[Any]):
'''simple docstring'''
import gzip
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(A , "rb") as orig_file:
with gzip.open(A , "wb") as zipped_file:
zipped_file.writelines(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : Optional[int]):
'''simple docstring'''
import gzip
UpperCamelCase : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(A , "rb") as orig_file:
with gzip.open(A , "wb") as zipped_file:
zipped_file.writelines(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : List[str] , A : int):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any] , A : Tuple , A : List[str] , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Tuple = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("nested" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Any , A : Dict):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : str , A : str):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.basename(A))
f.add(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : int , A : Dict , A : Any):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.join("nested" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ["0", "1", "2", "3"]
UpperCamelCase : Tuple = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : Optional[int] = ["0", "1", "2", "3"]
UpperCamelCase : List[str] = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : List[Any] = ["0", "1", "2", "3"]
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.abc"
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : Optional[Any] , A : Any):
'''simple docstring'''
UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Tuple , A : str):
'''simple docstring'''
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Any , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Dict = tmp_path_factory.mktemp("data") / "dataset.ext.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename("unsupported.ext"))
f.write(A , arcname=os.path.basename("unsupported_2.ext"))
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : List[Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg")
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav")
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "dataset.img.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A).replace(".jpg" , "2.jpg"))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("data_dir")
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / ".subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
return data_dir
| 435 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
__lowercase : List[str] = tempfile.mkdtemp()
__lowercase : List[Any] = BlipImageProcessor()
__lowercase : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase : List[Any] = BlipaProcessor(UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def _lowerCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowercase : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : int = self.get_image_processor()
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Dict = self.prepare_image_inputs()
__lowercase : Tuple = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Optional[int] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : Dict = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : str = '''lower newer'''
__lowercase : List[Any] = processor(text=UpperCamelCase_ )
__lowercase : int = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Union[str, Any] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Union[str, Any] = '''lower newer'''
__lowercase : int = self.prepare_image_inputs()
__lowercase : str = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : str = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : List[Any] = processor.batch_decode(UpperCamelCase_ )
__lowercase : List[str] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Optional[Any] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Optional[Any] = '''lower newer'''
__lowercase : Any = self.prepare_image_inputs()
__lowercase : Optional[int] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 76 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = {}
if "candidate_labels" in kwargs:
__UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = load_image(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase = candidate_labels
__UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [text_inputs]
return inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase = model_inputs.pop('''candidate_labels''' )
__UpperCamelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = text_inputs[0]
else:
# Batching case.
__UpperCamelCase = text_inputs[0][0]
__UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = model_outputs.pop('''candidate_labels''' )
__UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [scores]
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 451 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = IFPipeline
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return self._get_dummy_components()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : str = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
SCREAMING_SNAKE_CASE : str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
SCREAMING_SNAKE_CASE : str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 379 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = """CompVis/stable-diffusion-v1-1"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-2"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-3"""
__UpperCAmelCase = """CompVis/stable-diffusion-v1-4"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : str , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : List[str] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : int = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 379 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __snake_case( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
snake_case__ , snake_case__ : Any = np.shape(_lowerCAmelCase )
if rows != columns:
snake_case__ : Optional[Any] = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_lowerCAmelCase )
snake_case__ : Optional[Any] = np.zeros((rows, columns) )
snake_case__ : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
snake_case__ : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[str] = (table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __snake_case( _lowerCAmelCase = "" ) -> dict[str, float]:
snake_case__ : Tuple = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case__ : Optional[int] = BeautifulSoup(requests.get(_lowerCAmelCase ).text , """html.parser""" )
snake_case__ : Optional[int] = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case__ : List[str] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def __snake_case( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case__ : List[Any] = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , """w""" , newline="""""" ) as out_file:
snake_case__ : int = csv.writer(_lowerCAmelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 301 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_a : Dict = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def _lowerCAmelCase ( lowercase=True ) -> int:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase_ ) )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : str =None
a : Optional[int] =None
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = dataset_module_factory(__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = import_main_class(dataset_module.module_path,dataset=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = builder_cls(
cache_dir=__SCREAMING_SNAKE_CASE,config_name=__SCREAMING_SNAKE_CASE,hash=dataset_module.hash,)
__lowerCAmelCase = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__SCREAMING_SNAKE_CASE ).replace(os.sep,"""/""" ),
config.DATASET_INFO_FILENAME,
] )
__lowerCAmelCase = cached_path(__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__lowerCAmelCase = dataset_module_factory("""wikipedia""" , cache_dir=UpperCAmelCase_ )
__lowerCAmelCase = import_main_class(dataset_module.module_path )
__lowerCAmelCase = builder_cls(
cache_dir=UpperCAmelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowerCAmelCase = None
builder_instance.download_and_prepare()
__lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
__lowerCAmelCase = dataset_module_factory("""wikipedia""" , cache_dir=UpperCAmelCase_ )
__lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ )
__lowerCAmelCase = builder_cls(
cache_dir=UpperCAmelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
__lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert "train" in ds
assert isinstance(ds["""train"""] , UpperCAmelCase_ )
assert next(iter(ds["""train"""] ) )
| 689 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
_lowerCamelCase : List[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _UpperCAmelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = StableDiffusionLatentUpscalePipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__snake_case = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
__snake_case = True
@property
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Optional[int] = (16, 16)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def a__ ( self ) -> Any:
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_lowerCamelCase : Optional[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_lowerCamelCase : Optional[Any] = CLIPTextModel(_lowercase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def a__ ( self , _lowercase , _lowercase=0 ) -> List[str]:
if str(_lowercase ).startswith('''mps''' ):
_lowerCamelCase : List[Any] = torch.manual_seed(_lowercase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCamelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : int = '''cpu'''
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCamelCase : Dict = self.get_dummy_inputs(_lowercase )
_lowerCamelCase : List[str] = pipe(**_lowercase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCamelCase : List[str] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1E-3 )
def a__ ( self ) -> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def a__ ( self ) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def a__ ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a__ ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def a__ ( self ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def a__ ( self ) -> int:
super().test_save_load_local(expected_max_difference=3E-3 )
def a__ ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : Optional[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_lowerCamelCase : Optional[Any] = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCamelCase : Dict = self.get_dummy_inputs(_lowercase )
_lowerCamelCase : Any = 2
_lowerCamelCase : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCamelCase : Optional[int] = getattr(_lowercase , scheduler_enum.name )
_lowerCamelCase : str = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCamelCase : Optional[int] = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> str:
_lowerCamelCase : Dict = torch.manual_seed(33 )
_lowerCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_lowerCamelCase : Dict = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_lowerCamelCase : Tuple = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_lowerCamelCase : Tuple = pipe(_lowercase , generator=_lowercase , output_type='''latent''' ).images
_lowerCamelCase : Optional[Any] = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type='''np''' , ).images[0]
_lowerCamelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def a__ ( self ) -> Any:
_lowerCamelCase : List[Any] = torch.manual_seed(33 )
_lowerCamelCase : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_lowerCamelCase : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_lowerCamelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_lowerCamelCase : Optional[Any] = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type='''np''' , ).images[0]
_lowerCamelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 707 | """simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ) -> List[Any]:
_lowerCamelCase : Tuple = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> None:
super().__init__(**_lowercase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Union[str, Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_lowerCamelCase : str = domain_map
def a__ ( self , _lowercase ) -> Tuple[torch.Tensor]:
_lowerCamelCase : Any = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase : Optional[Any] = function
def a__ ( self , _lowercase , *_lowercase ) -> str:
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase = 1 ) -> None:
_lowerCamelCase : int = dim
_lowerCamelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _lowercase ) -> Dict:
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Distribution:
_lowerCamelCase : Any = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _lowercase ) -> nn.Module:
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_lowercase ) -> int:
raise NotImplementedError()
@staticmethod
def a__ ( _lowercase ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"df": 1, "loc": 1, "scale": 1}
__snake_case = StudentT
@classmethod
def a__ ( cls , _lowercase , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : List[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"loc": 1, "scale": 1}
__snake_case = Normal
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"total_count": 1, "logits": 1}
__snake_case = NegativeBinomial
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> int:
_lowerCamelCase : str = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _lowercase ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = '''last'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=A_ )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE__ = model(A_ )[0]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 100 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class a__ ( _lowercase ):
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Dict = ["input_ids", "attention_mask"]
__magic_name__ : Tuple = RobertaTokenizer
def __init__(self : Optional[Any], __UpperCAmelCase : Dict=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Any="replace", __UpperCAmelCase : Dict="<s>", __UpperCAmelCase : List[Any]="</s>", __UpperCAmelCase : Union[str, Any]="</s>", __UpperCAmelCase : int="<s>", __UpperCAmelCase : Optional[Any]="<unk>", __UpperCAmelCase : Tuple="<pad>", __UpperCAmelCase : Union[str, Any]="<mask>", __UpperCAmelCase : Any=False, __UpperCAmelCase : Optional[int]=True, **__UpperCAmelCase : Optional[Any], ) -> Tuple:
"""simple docstring"""
super().__init__(
__UpperCAmelCase, __UpperCAmelCase, tokenizer_file=__UpperCAmelCase, errors=__UpperCAmelCase, bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, add_prefix_space=__UpperCAmelCase, trim_offsets=__UpperCAmelCase, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(__UpperCAmelCase, pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = pre_tok_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = '''post_processor'''
SCREAMING_SNAKE_CASE : Any = getattr(self.backend_tokenizer, __UpperCAmelCase, __UpperCAmelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : str = False
if state.get('''add_prefix_space''', __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = True
if state.get('''trim_offsets''', __UpperCAmelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : Any = trim_offsets
SCREAMING_SNAKE_CASE : Dict = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCAmelCase, state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer, __UpperCAmelCase, __UpperCAmelCase )
@property
def lowercase__ (self : int ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ (self : int, __UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else value
SCREAMING_SNAKE_CASE : List[str] = value
def lowercase__ (self : Optional[int], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.get('''is_split_into_words''', __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : Tuple ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.get('''is_split_into_words''', __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(__UpperCAmelCase, name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 507 | 0 |
import requests
from bsa import BeautifulSoup
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = BeautifulSoup(requests.get(_lowercase , params=_lowercase ).content , '''html.parser''' )
_A = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
_A = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__A = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 62 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Optional[int] = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """swinv2"""
lowercase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=96 , SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=4.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=32 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(SCREAMING_SNAKE_CASE )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE ) - 1) )
UpperCamelCase = (0, 0, 0, 0)
| 606 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = ["""image_processor"""]
lowercase = """SamImageProcessor"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
UpperCamelCase = -10
UpperCamelCase = self.image_processor.size["longest_edge"]
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase = encoding_image_processor["original_sizes"]
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ): # Checks if Torch or TF tensor
UpperCamelCase = original_sizes.numpy()
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , )
UpperCamelCase = self._normalize_and_convert(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="pt" , ) -> int:
"""simple docstring"""
if input_points is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for point, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase , UpperCamelCase = self._pad_points_and_labels(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_labels is not None:
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , is_bounding_box=SCREAMING_SNAKE_CASE )
for box, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = max([point.shape[0] for point in input_points] )
UpperCamelCase = []
for i, point in enumerate(SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = processed_input_points
return input_points, input_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = original_size
UpperCamelCase , UpperCamelCase = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE , longest_edge=SCREAMING_SNAKE_CASE )
UpperCamelCase = deepcopy(SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 2 , 2 )
UpperCamelCase = coords[..., 0] * (new_w / old_w)
UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 4 )
return coords
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ): # Checks for TF or Torch tensor
UpperCamelCase = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
UpperCamelCase = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ):
UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE ):
raise ValueError("Input labels must be a list of list integers." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
UpperCamelCase = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ):
UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 606 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_:Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE_:List[Any] = CLIPImageProcessor()
SCREAMING_SNAKE_CASE_:str = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
SCREAMING_SNAKE_CASE_:Optional[int] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 520 |
from math import sqrt
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : List[str] = 0
for i in range(1 , int(sqrt(_lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCAmelCase ):
total += i + n // i
elif i == sqrt(_lowerCAmelCase ):
total += i
return total - n
def __UpperCamelCase ( _lowerCAmelCase = 1_0000 ) -> int:
"""simple docstring"""
A : int = sum(
i
for i in range(1 , _lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(_lowerCAmelCase ) ) == i and sum_of_divisors(_lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 520 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__A : Any = logging.get_logger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[int]=False , lowerCamelCase : Tuple=False , lowerCamelCase : str=6.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=None , lowerCamelCase : str="fp4" , lowerCamelCase : Dict=False , **lowerCamelCase : Dict , ) -> Tuple:
lowerCAmelCase_ : int = load_in_abit
lowerCAmelCase_ : Optional[Any] = load_in_abit
lowerCAmelCase_ : Any = llm_inta_threshold
lowerCAmelCase_ : Optional[int] = llm_inta_skip_modules
lowerCAmelCase_ : str = llm_inta_enable_fpaa_cpu_offload
lowerCAmelCase_ : int = llm_inta_has_fpaa_weight
lowerCAmelCase_ : Optional[int] = bnb_abit_quant_type
lowerCAmelCase_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCAmelCase_ : List[Any] = torch.floataa
elif isinstance(_lowercase , _lowercase ):
lowerCAmelCase_ : Dict = getattr(_lowercase , _lowercase )
elif isinstance(_lowercase , torch.dtype ):
lowerCAmelCase_ : str = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def __lowercase ( self : Optional[int] ) -> Dict:
if not isinstance(self.llm_inta_threshold , _lowercase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _lowercase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _lowercase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _lowercase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _lowercase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _lowercase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def __lowercase ( self : List[str] ) -> str:
return self.load_in_abit or self.load_in_abit
def __lowercase ( self : List[str] ) -> Optional[int]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __lowercase ( cls : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> List[str]:
lowerCAmelCase_ : List[str] = cls(**_lowercase )
lowerCAmelCase_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_lowercase , _lowercase ):
setattr(_lowercase , _lowercase , _lowercase )
to_remove.append(_lowercase )
for key in to_remove:
kwargs.pop(_lowercase , _lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __lowercase ( self : Optional[Any] , lowerCamelCase : Union[str, os.PathLike] ) -> Dict:
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
lowerCAmelCase_ : Optional[int] = self.to_dict()
lowerCAmelCase_ : Tuple = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
writer.write(_lowercase )
def __lowercase ( self : int ) -> List[str]:
lowerCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[str] ) -> str:
return F'{self.__class__.__name__} {self.to_json_string()}'
def __lowercase ( self : Tuple , lowerCamelCase : bool = True ) -> Dict:
if use_diff is True:
lowerCAmelCase_ : List[Any] = self.to_diff_dict()
else:
lowerCAmelCase_ : List[str] = self.to_dict()
return json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + "\n"
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : List[Any] = self.to_dict()
# get the default config dict
lowerCAmelCase_ : str = BitsAndBytesConfig().to_dict()
lowerCAmelCase_ : Dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCAmelCase_ : Union[str, Any] = value
return serializable_config_dict
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 701 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""DPTFeatureExtractor"""]
lowerCamelCase__ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : int = logging.getLogger(__name__)
_a : List[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_a : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "The input training data file (a text file)."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
_SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={"help": "Whether ot not to use whole word mask."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_SCREAMING_SNAKE_CASE : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
_SCREAMING_SNAKE_CASE : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _a (lowercase__ : DataTrainingArguments , lowercase__ : PreTrainedTokenizer , lowercase__ : bool = False , lowercase__ : Optional[str] = None , ) -> Optional[Any]:
"""simple docstring"""
def _dataset(lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , ref_path=lowercase__ , )
return LineByLineTextDataset(tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _a () -> List[str]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__snake_case = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
__snake_case = AutoModelWithLMHead.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__snake_case = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case = (
get_dataset(lowercase__ , tokenizer=lowercase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case = (
get_dataset(lowercase__ , tokenizer=lowercase__ , evaluate=lowercase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case = DataCollatorForWholeWordMask(
tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
else:
__snake_case = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case = Trainer(
model=lowercase__ , args=lowercase__ , data_collator=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , prediction_loss_only=lowercase__ , )
# Training
if training_args.do_train:
__snake_case = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = math.exp(eval_output['eval_loss'] )
__snake_case = {'perplexity': perplexity}
__snake_case = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowercase__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowercase__ )
return results
def _a (lowercase__ : Tuple ) -> Dict:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56 | import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LongformerTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = LongformerTokenizerFast
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : str = {'unk_token': '<unk>'}
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = 'lower newer'
__snake_case : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : str = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase_ ( self ):
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
__snake_case : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
__snake_case : str = 'Encode this sequence.'
__snake_case : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__snake_case : int = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__snake_case : Any = 'Encode <mask> sequence'
__snake_case : str = 'Encode <mask>sequence'
__snake_case : Union[str, Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : Optional[Any] = encoded.index(_UpperCAmelCase )
__snake_case : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : List[Any] = encoded.index(_UpperCAmelCase )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : int = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = 'A, <mask> AllenNLP sentence.'
__snake_case : Optional[Any] = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase )
def lowercase_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : Any = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[str] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 576 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
a_ = (DDIMParallelScheduler,)
a_ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase ( self : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def lowercase ( self : Dict , **lowerCAmelCase_ : Dict ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = 1_0, 0.0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def lowercase ( self : Dict ) -> Any:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Dict ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def lowercase ( self : Optional[int] ) -> List[str]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Dict ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Dict ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def lowercase ( self : str ) -> Optional[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Tuple ) -> str:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def lowercase ( self : Union[str, Any] ) -> Any:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Any ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Union[str, Any] ) -> str:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = 1_0, 0.0
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = self.dummy_sample_deter + 0.1
__lowerCAmelCase = self.dummy_sample_deter - 0.1
__lowerCAmelCase = samplea.shape[0]
__lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase = torch.arange(__SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 716 |
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = int(lowerCAmelCase_ )
if n_element < 1:
__lowerCAmelCase = ValueError('a should be a positive number' )
raise my_error
__lowerCAmelCase = [1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (0, 0, 0)
__lowerCAmelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case : List[Any] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_snake_case : str = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 421 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ : Optional[int] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : int = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCamelCase__ : Dict = spec.loader.load_module()
UpperCamelCase__ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ : Any = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCamelCase__ : int = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a = []
for config_class in list(CONFIG_MAPPING.values() ):
a = False
# source code of `config_class`
a = inspect.getsource(snake_case_ )
a = _re_checkpoint.findall(snake_case_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
a , a = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
a = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
a = True
break
a = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case_ )
if len(snake_case_ ) > 0:
a = '''\n'''.join(sorted(snake_case_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 387 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = BeautifulSoup(requests.get(snake_case_, params=snake_case_ ).content, '''html.parser''' )
a = soup.find('''div''', attrs={'''class''': '''gs_ri'''} )
a = div.find('''div''', attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCamelCase__ : Tuple = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 387 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> list:
if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
_lowercase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> Optional[Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list ) -> tuple[list, list, list, list]:
if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
_lowercase = len(snake_case__ )
_lowercase = matrix_length // 2
_lowercase = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )]
_lowercase = [
[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ )
]
_lowercase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )]
_lowercase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list ) -> tuple[int, int]:
return len(snake_case__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list ) -> None:
print('\n'.join(str(snake_case__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> list:
if matrix_dimensions(snake_case__ ) == (2, 2):
return default_matrix_multiplication(snake_case__ , snake_case__ )
_lowercase , _lowercase , _lowercase , _lowercase = split_matrix(snake_case__ )
_lowercase , _lowercase , _lowercase , _lowercase = split_matrix(snake_case__ )
_lowercase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
_lowercase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
_lowercase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
_lowercase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
_lowercase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
_lowercase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
_lowercase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
_lowercase = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
_lowercase = matrix_addition(snake_case__ , snake_case__ )
_lowercase = matrix_addition(snake_case__ , snake_case__ )
_lowercase = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
# construct the new matrix from our 4 quadrants
_lowercase = []
for i in range(len(snake_case__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> list:
if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]:
_lowercase = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(snake_case__ )
_lowercase = matrix_dimensions(snake_case__ )
_lowercase = matrix_dimensions(snake_case__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_lowercase = max(*snake_case__ , *snake_case__ )
_lowercase = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) )
_lowercase = matrixa
_lowercase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_lowercase = actual_strassen(snake_case__ , snake_case__ )
# Removing the additional zeros
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa)) | 535 |
snake_case = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 535 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 1 |
def UpperCamelCase (lowercase_: list , lowercase_: int = 0 ) -> list:
A__ : Any = length or len(lowercase_ )
A__ : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ : Optional[Any] = list_data[i + 1], list_data[i]
A__ : Optional[int] = True
return list_data if not swapped else bubble_sort(lowercase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = '''blenderbot-small'''
__lowerCamelCase : List[str] = ['''past_key_values''']
__lowerCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,SCREAMING_SNAKE_CASE_=50265 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=2 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : int = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : int = d_model
snake_case : List[Any] = encoder_ffn_dim
snake_case : str = encoder_layers
snake_case : List[Any] = encoder_attention_heads
snake_case : Tuple = decoder_ffn_dim
snake_case : List[Any] = decoder_layers
snake_case : List[Any] = decoder_attention_heads
snake_case : str = dropout
snake_case : Union[str, Any] = attention_dropout
snake_case : Any = activation_dropout
snake_case : str = activation_function
snake_case : str = init_std
snake_case : Union[str, Any] = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : List[str] = use_cache
snake_case : Optional[Any] = encoder_layers
snake_case : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,is_encoder_decoder=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=SCREAMING_SNAKE_CASE_ ,forced_eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case : int = {0: """batch"""}
snake_case : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case : Any = {0: """batch""", 1: """decoder_sequence"""}
snake_case : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case , snake_case : int = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
snake_case : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : Union[str, Any] = super().outputs
else:
snake_case : Optional[Any] = super(SCREAMING_SNAKE_CASE_ ,self ).outputs
if self.use_past:
snake_case , snake_case : List[str] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : str = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
snake_case : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Generate decoder inputs
snake_case : Optional[int] = seq_length if not self.use_past else 1
snake_case : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case : int = dict(**SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : List[Any] = common_inputs["""input_ids"""].shape
snake_case : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
snake_case , snake_case : Optional[int] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Optional[Any] = decoder_seq_length + 3
snake_case : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : Dict = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )] ,dim=1 )
snake_case : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case : List[str] = self.num_layers
snake_case : Dict = min(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = max(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) - min_num_layers
snake_case : Union[str, Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
) )
# TODO: test this.
snake_case : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) )
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
snake_case : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case : List[Any] = seqlen + 2
snake_case , snake_case : Tuple = self.num_layers
snake_case , snake_case : Optional[int] = self.num_attention_heads
snake_case : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Any = common_inputs["""attention_mask"""].dtype
snake_case : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,dtype=SCREAMING_SNAKE_CASE_ )] ,dim=1 )
snake_case : Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(SCREAMING_SNAKE_CASE_ )
]
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Tuple = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : List[str] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE_ )
# Generate dummy inputs according to compute batch and sequence
snake_case : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Optional[int] = dict(tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ) )
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
elif self.task == "causal-lm":
snake_case : List[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
else:
snake_case : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : int = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = super(SCREAMING_SNAKE_CASE_ ,self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | class _A ( __UpperCamelCase ):
pass
class _A ( __UpperCamelCase ):
pass
class _A :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
[],
[],
[],
]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _a (self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__(self ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _A :
def __init__(self ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCamelCase__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__(self ) -> str:
'''simple docstring'''
return str(self.queue )
def __UpperCamelCase ( ):
UpperCamelCase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
UpperCamelCase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 415 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Union[str, Any] = UnCLIPImageVariationPipeline
__A : str = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
__A : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
__A : List[Any] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
__A : List[Any] = False
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase)
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Tuple = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
a__ : str = UnCLIPTextProjModel(**lowercase)
return model
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[Any] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
a__ : Tuple = UNetaDConditionModel(**lowercase)
return model
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Dict = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
torch.manual_seed(1)
a__ : Dict = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.dummy_decoder
a__ : Any = self.dummy_text_proj
a__ : str = self.dummy_text_encoder
a__ : List[str] = self.dummy_tokenizer
a__ : Tuple = self.dummy_super_res_first
a__ : Tuple = self.dummy_super_res_last
a__ : str = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
a__ : str = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
a__ : List[str] = CLIPImageProcessor(crop_size=32 , size=32)
a__ : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowercase ( self , lowercase , lowercase=0 , lowercase=True) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__ : Union[str, Any] = torch.manual_seed(lowercase)
else:
a__ : Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
if pil_image:
a__ : Any = input_image * 0.5 + 0.5
a__ : Optional[int] = input_image.clamp(0 , 1)
a__ : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(lowercase)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : int = 'cpu'
a__ : List[Any] = self.get_dummy_components()
a__ : Union[str, Any] = self.pipeline_class(**lowercase)
a__ : List[str] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : Dict = pipe(**lowercase)
a__ : Any = output.images
a__ : Any = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : Tuple = pipe(
**lowercase , return_dict=lowercase , )[0]
a__ : Optional[int] = image[0, -3:, -3:, -1]
a__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : str = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = 'cpu'
a__ : int = self.get_dummy_components()
a__ : List[Any] = self.pipeline_class(**lowercase)
a__ : Dict = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Any = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : Dict = pipe(**lowercase)
a__ : List[str] = output.images
a__ : Tuple = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : Union[str, Any] = pipe(
**lowercase , return_dict=lowercase , )[0]
a__ : Optional[Any] = image[0, -3:, -3:, -1]
a__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Optional[Any] = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = 'cpu'
a__ : List[Any] = self.get_dummy_components()
a__ : Tuple = self.pipeline_class(**lowercase)
a__ : Optional[int] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[int] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : int = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
a__ : Optional[int] = pipe(**lowercase)
a__ : int = output.images
a__ : List[str] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : Optional[int] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
a__ : int = pipe(
**lowercase , return_dict=lowercase , )[0]
a__ : Dict = image[0, -3:, -3:, -1]
a__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
a__ : List[Any] = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Tuple = torch.device('cpu')
class A__ :
"""simple docstring"""
__A : List[str] = 1
a__ : Dict = self.get_dummy_components()
a__ : str = self.pipeline_class(**lowercase)
a__ : Optional[Any] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = torch.Generator(device=lowercase).manual_seed(0)
a__ : Tuple = pipe.decoder.dtype
a__ : Union[str, Any] = 1
a__ : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a__ : str = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler())
a__ : int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a__ : str = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler())
a__ : Optional[int] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__ : List[str] = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase).images
a__ : Union[str, Any] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
# Don't pass image, instead pass embedding
a__ : Union[str, Any] = pipeline_inputs.pop('image')
a__ : Dict = pipe.image_encoder(lowercase).image_embeds
a__ : Optional[int] = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase , image_embeddings=lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a__ : Optional[int] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , expected_max_diff=lowercase)
@skip_mps
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = torch_device == 'cpu'
a__ : Any = True
a__ : Dict = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a__ : int = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase)
@skip_mps
def __lowercase ( self) -> str:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png')
a__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy')
a__ : Any = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa)
a__ : int = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = torch.Generator(device='cpu').manual_seed(0)
a__ : Optional[int] = pipeline(
lowercase , generator=lowercase , output_type='np' , )
a__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowercase , lowercase , 15)
| 392 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : int = regularization
a__ : int = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : List[str] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : Tuple = observations
a__ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Tuple = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[Any] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : str = LinearConstraint(lowercase , 0 , 0)
a__ : List[Any] = Bounds(0 , self.regularization)
a__ : Optional[int] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : str = l_star
# calculating mean offset of separation plane to points
a__ : Optional[int] = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : str = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def UpperCAmelCase (self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def UpperCAmelCase (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 ,generator=SCREAMING_SNAKE_CASE_ ,output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 ,generator=SCREAMING_SNAKE_CASE_ ,output_type='''numpy''' ,return_dict=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 ,generator=SCREAMING_SNAKE_CASE_ ,output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 535 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase ( lowercase__ ):
lowercase = '''segformer'''
def __init__(self : str ,SCREAMING_SNAKE_CASE_ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE_ : List[str]=4 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=[2, 2, 2, 2] ,SCREAMING_SNAKE_CASE_ : List[str]=[8, 4, 2, 1] ,SCREAMING_SNAKE_CASE_ : int=[32, 64, 160, 256] ,SCREAMING_SNAKE_CASE_ : Optional[Any]=[7, 3, 3, 3] ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=[4, 2, 2, 2] ,SCREAMING_SNAKE_CASE_ : List[Any]=[1, 2, 5, 8] ,SCREAMING_SNAKE_CASE_ : Dict=[4, 4, 4, 4] ,SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" ,SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,SCREAMING_SNAKE_CASE_ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE_ : str=0.1 ,SCREAMING_SNAKE_CASE_ : Dict=1e-6 ,SCREAMING_SNAKE_CASE_ : List[str]=256 ,SCREAMING_SNAKE_CASE_ : List[str]=255 ,**SCREAMING_SNAKE_CASE_ : Tuple ,) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' ,SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = kwargs.get('''reshape_last_stage''' ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = semantic_loss_ignore_index
class lowercase ( lowercase__ ):
lowercase = version.parse('''1.11''' )
@property
def UpperCAmelCase (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase (self : str ) -> float:
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase (self : Dict ) -> int:
"""simple docstring"""
return 12
| 535 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCAmelCase__ :Dict = (low + high) // 2
lowerCAmelCase__ :Union[str, Any] = max_subarray(a_ , a_ , a_ )
lowerCAmelCase__ :Optional[int] = max_subarray(a_ , mid + 1 , a_ )
lowerCAmelCase__ :List[str] = max_cross_sum(a_ , a_ , a_ , a_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :str = float('-inf' ), -1
lowerCAmelCase__ :Any = float('-inf' ), -1
lowerCAmelCase__ :int | float = 0
for i in range(a_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCAmelCase__ :List[str] = summ
lowerCAmelCase__ :int = i
lowerCAmelCase__ :int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCAmelCase__ :List[str] = summ
lowerCAmelCase__ :Dict = i
return max_left, max_right, (left_sum + right_sum)
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = [randint(1 , a_ ) for _ in range(a_ )]
lowerCAmelCase__ :int = time.time()
max_subarray(a_ , 0 , input_size - 1 )
lowerCAmelCase__ :List[Any] = time.time()
return end - start
def __A () ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowerCAmelCase__ :Dict = [time_max_subarray(a_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(a_ , a_ ):
print(a_ , '\t\t' , a_ )
plt.plot(a_ , a_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 705 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 200_0000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :str = [0 for i in range(n + 1 )]
lowerCAmelCase__ :Any = 1
lowerCAmelCase__ :str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 1
lowerCAmelCase__ :Dict = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 560 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = '''marian'''
_SCREAMING_SNAKE_CASE : int = ['''past_key_values''']
_SCREAMING_SNAKE_CASE : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[Any]=58_101 , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=1_024 , _SCREAMING_SNAKE_CASE : Union[str, Any]=12 , _SCREAMING_SNAKE_CASE : Dict=4_096 , _SCREAMING_SNAKE_CASE : Union[str, Any]=16 , _SCREAMING_SNAKE_CASE : int=12 , _SCREAMING_SNAKE_CASE : Any=4_096 , _SCREAMING_SNAKE_CASE : Dict=16 , _SCREAMING_SNAKE_CASE : List[str]=0.0 , _SCREAMING_SNAKE_CASE : List[Any]=0.0 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : List[str]="gelu" , _SCREAMING_SNAKE_CASE : Tuple=1_024 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : List[str]=0.0 , _SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Optional[int]=58_100 , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : str=58_100 , _SCREAMING_SNAKE_CASE : Tuple=0 , _SCREAMING_SNAKE_CASE : Tuple=0 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , **_SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Any = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Optional[int] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : int = {0: 'batch'}
SCREAMING_SNAKE_CASE : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowerCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : str = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : List[str] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Dict = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = common_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE : List[Any] = common_inputs['decoder_input_ids'].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.num_layers
SCREAMING_SNAKE_CASE : Dict = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
SCREAMING_SNAKE_CASE : str = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : List[str] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def _lowerCAmelCase ( self : int , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Tuple = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = common_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : str = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Tuple = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 265 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCamelCase = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCamelCase = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def remove_articles(UpperCamelCase__ ):
A__ = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase__ , ' ' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(UpperCamelCase__ )
A__ = Counter(UpperCamelCase__ )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(UpperCamelCase__ )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(UpperCamelCase__ ) > 0:
A__ = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(UpperCamelCase__ ) > 0:
A__ = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
A__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
A__ = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
A__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(UpperCamelCase__ ) > 0:
A__ = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A__ = addtmpscore / len(UpperCamelCase__ )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = len(UpperCamelCase__ )
A__ = ssent.split(' ' )
A__ = csent.split(' ' )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(' ' )
A__ = []
A__ = []
A__ = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
A__ = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
A__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
A__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
A__ = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
A__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
A__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
A__ = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
A__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
A__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((A__) , (A__) , (A__)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((A__) , (A__) , (A__)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((A__) , (A__) , (A__)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((A__) , (A__) , (A__)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
"""simple docstring"""
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
A__ = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
A__ = sari_score / len(UpperCamelCase__ )
return 100 * sari_score
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
A__ = {}
result.update({'sari': compute_sari(sources=__UpperCAmelCase ,predictions=__UpperCAmelCase ,references=__UpperCAmelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__UpperCAmelCase ,references=__UpperCAmelCase )} )
result.update({'exact': compute_em(predictions=__UpperCAmelCase ,references=__UpperCAmelCase )} )
return result
| 707 | """simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
return 0.0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(UpperCamelCase__ ) )
A__ = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
A__ = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(UpperCamelCase__ )
plt.show()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 536 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=lowerCAmelCase__ , )
assert hasattr(self , """env""")
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__SCREAMING_SNAKE_CASE = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCAmelCase__ , instance_count=lowerCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase__ , py_version="""py36""" , )
def snake_case_ ( self , lowerCAmelCase__):
TrainingJobAnalytics(lowerCAmelCase__).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def snake_case_ ( self , lowerCAmelCase__):
# create estimator
__SCREAMING_SNAKE_CASE = self.create_estimator(lowerCAmelCase__)
# run training
estimator.fit()
# result dataframe
__SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCAmelCase__)
| 155 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
__SCREAMING_SNAKE_CASE = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number >= 0: # Get binary representation of positive number
__SCREAMING_SNAKE_CASE = """0""" + str(bin(UpperCamelCase_ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
__SCREAMING_SNAKE_CASE = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
__SCREAMING_SNAKE_CASE = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
"""1""" + """0""" * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase (unittest.TestCase ):
def __init__( self :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any]=13 , __magic_name__ :List[str]=30 , __magic_name__ :List[str]=2 , __magic_name__ :Dict=3 , __magic_name__ :Optional[int]=True , __magic_name__ :int=True , __magic_name__ :Optional[int]=32 , __magic_name__ :Dict=5 , __magic_name__ :Optional[int]=4 , __magic_name__ :List[str]=37 , __magic_name__ :Any="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :int=10 , __magic_name__ :List[Any]=0.02 , ) ->str:
lowercase : List[str] = parent
lowercase : List[str] = batch_size
lowercase : List[str] = image_size
lowercase : Optional[int] = patch_size
lowercase : str = num_channels
lowercase : Optional[Any] = is_training
lowercase : Union[str, Any] = use_labels
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : int = hidden_act
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : Union[str, Any] = (image_size // patch_size) ** 2
lowercase : Optional[Any] = num_patches + 1
def __snake_case ( self :List[str] ) ->Dict:
lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, pixel_values
def __snake_case ( self :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple ) ->Union[str, Any]:
lowercase : Dict = FlaxViTModel(config=__magic_name__ )
lowercase : str = model(__magic_name__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = (self.image_size, self.image_size)
lowercase : str = (self.patch_size, self.patch_size)
lowercase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __snake_case ( self :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :str ) ->Union[str, Any]:
lowercase : Optional[int] = self.type_sequence_label_size
lowercase : Tuple = FlaxViTForImageClassification(config=__magic_name__ )
lowercase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : List[Any] = 1
lowercase : int = FlaxViTForImageClassification(__magic_name__ )
lowercase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Union[str, Any] = model(__magic_name__ )
def __snake_case ( self :Tuple ) ->int:
lowercase : int = self.prepare_config_and_inputs()
(
lowercase
) : List[Any] = config_and_inputs
lowercase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __snake_case ( self :Optional[int] ) ->None:
lowercase : List[Any] = FlaxViTModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __snake_case ( self :int ) ->int:
self.config_tester.run_common_tests()
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->Any:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __snake_case ( self :List[str] ) ->Optional[int]:
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(__magic_name__ )
lowercase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :str ) ->Optional[Any]:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__ :int , **__magic_name__ :int ):
return model(pixel_values=__magic_name__ , **__magic_name__ )
with self.subTest("""JIT Enabled""" ):
lowercase : Union[str, Any] = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : Dict = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __snake_case ( self :str ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
lowercase : Union[str, Any] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
lowercase : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__magic_name__ )
| 708 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase (__snake_case ):
def __init__( self :List[str] , __magic_name__ :Dict ) ->Optional[int]:
lowercase : int = data
def __iter__( self :List[Any] ) ->Optional[Any]:
for element in self.data:
yield element
def UpperCamelCase ( _A=True ) -> Any:
lowercase : Optional[int] = Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCamelCase ( _A , _A , _A , _A = False ) -> Any:
if iterable:
lowercase : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
lowercase : Optional[int] = TensorDataset(torch.as_tensor(range(_A ) ) )
lowercase : Dict = DataLoader(_A , batch_size=_A )
lowercase : Union[str, Any] = accelerator.prepare(_A )
return dl
def UpperCamelCase ( _A , _A , _A , _A , _A , ) -> str:
lowercase : Optional[int] = create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A )
lowercase : Optional[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCamelCase ( ) -> Dict:
lowercase : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCamelCase ( ) -> str:
lowercase : Dict = create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCamelCase ( ) -> Optional[int]:
lowercase : List[str] = create_accelerator(even_batches=_A )
lowercase : List[str] = torch.nn.Linear(1 , 1 )
lowercase : List[Any] = accelerator.prepare(_A )
lowercase : Optional[int] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
lowercase : Any = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
lowercase : Union[str, Any] = ddp_model(batch[0].float() )
lowercase : Optional[Any] = output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCamelCase ( _A ) -> Dict:
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCamelCase ( ) -> Optional[Any]:
lowercase : Union[str, Any] = True
lowercase : str = False
lowercase : Union[str, Any] = create_accelerator(even_batches=_A )
lowercase : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase : Dict = accelerator.prepare(_A )
lowercase : int = create_dataloader(_A , dataset_size=3 , batch_size=1 )
lowercase : List[Any] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
lowercase : List[Any] = train_dl.batch_sampler.even_batches
lowercase : Optional[int] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase ( ) -> int:
lowercase : Optional[Any] = True
lowercase : Dict = False
lowercase : Union[str, Any] = create_accelerator(even_batches=_A )
lowercase : Optional[Any] = torch.nn.Linear(1 , 1 )
lowercase : int = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
lowercase : int = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
lowercase : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase ( ) -> Any:
lowercase : Optional[Any] = create_accelerator()
lowercase : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase : List[Any] = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCamelCase ( ) -> List[str]:
lowercase : List[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase : str = accelerator.state.distributed_type
lowercase : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
lowercase : Optional[Any] = original_state
if __name__ == "__main__":
main()
| 348 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase )
class a_ ( lowerCamelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , """decord""" )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ) -> Union[str, Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
UpperCamelCase = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
UpperCamelCase = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase = list(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ) -> List[str]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase ,UpperCamelCase = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
| 301 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
lowercase = ViTImageProcessor if is_vision_available() else None
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = (3, 32, 128)
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
UpperCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCamelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.randn(1 , 27 , 38 )
UpperCamelCase = torch.randn(1 , 27 , 50257 )
UpperCamelCase = torch.randn(1 , 27 , 30522 )
UpperCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 301 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ) -> Dict:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> Dict:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : int ) -> Dict:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[int] ) -> Any:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 702 | """simple docstring"""
import cmath
import math
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> complex:
_UpperCamelCase = math.radians(a__ )
_UpperCamelCase = math.radians(a__ )
# Convert voltage and current to rectangular form
_UpperCamelCase = cmath.rect(a__ , a__ )
_UpperCamelCase = cmath.rect(a__ , a__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
"""simple docstring"""
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0
lowerCAmelCase : List[str] = n - i
lowerCAmelCase : List[Any] = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase : Tuple = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase : int = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase : str = _k
break
if max_jump >= 0:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase : Any = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase , lowerCAmelCase : Dict = divmod(SCREAMING_SNAKE_CASE , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : str = []
else:
lowerCAmelCase : Tuple = {c: []}
lowerCAmelCase : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase , lowerCAmelCase : Optional[int] = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase , lowerCAmelCase : Any = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase : List[str] = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase : Any = i
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase : Tuple = ds_c + ds_b
diff += addend
lowerCAmelCase : int = 0
for j in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = a_i[j] + addend
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Any = digits[j] + addend
if s >= 1_0:
lowerCAmelCase , lowerCAmelCase : List[str] = divmod(SCREAMING_SNAKE_CASE , 1_0 )
lowerCAmelCase : Optional[int] = addend // 1_0 + quotient
else:
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
lowerCAmelCase , lowerCAmelCase : Optional[int] = divmod(SCREAMING_SNAKE_CASE , 1_0 )
digits.append(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0**1_5 ):
'''simple docstring'''
lowerCAmelCase : Any = [1]
lowerCAmelCase : int = 1
lowerCAmelCase : int = 0
while True:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = next_term(SCREAMING_SNAKE_CASE , 2_0 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 645 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCAmelCase__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Union[str, Any] =FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[int] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : int =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Dict =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : int =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Any =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
a : Optional[Any] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 645 | 1 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a_ = '''.'''
if __name__ == "__main__":
a_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
a_ = []
a_ = []
with open(doctest_file_path) as fp:
for line in fp:
a_ = line.strip()
a_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a_ = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 713 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
snake_case = parser.parse_args()
snake_case = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 62 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE : Optional[int] = str(abs(lowercase ) )
SCREAMING_SNAKE_CASE : str = [list(lowercase ) for char in range(len(lowercase ) )]
for index in range(len(lowercase ) ):
num_transpositions[index].pop(lowercase )
return max(
int("".join(list(lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 62 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """xlm"""
lowercase_ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[int]=30_145 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=16 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=512 , SCREAMING_SNAKE_CASE : Tuple=2_048**-0.5 , SCREAMING_SNAKE_CASE : Dict=1E-1_2 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str="first" , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Dict=0 , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = emb_dim
lowercase__ : int = n_layers
lowercase__ : Tuple = n_heads
lowercase__ : Optional[int] = dropout
lowercase__ : int = attention_dropout
lowercase__ : Optional[int] = gelu_activation
lowercase__ : Optional[int] = sinusoidal_embeddings
lowercase__ : str = causal
lowercase__ : Optional[int] = asm
lowercase__ : Optional[int] = n_langs
lowercase__ : List[str] = use_lang_emb
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[Any] = bos_index
lowercase__ : int = eos_index
lowercase__ : Dict = pad_index
lowercase__ : Tuple = unk_index
lowercase__ : List[str] = mask_index
lowercase__ : Dict = is_encoder
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[Any] = embed_init_std
lowercase__ : Any = init_std
lowercase__ : List[str] = summary_type
lowercase__ : Union[str, Any] = summary_use_proj
lowercase__ : Any = summary_activation
lowercase__ : List[Any] = summary_proj_to_labels
lowercase__ : Union[str, Any] = summary_first_dropout
lowercase__ : Tuple = start_n_top
lowercase__ : List[Any] = end_n_top
lowercase__ : List[str] = mask_token_id
lowercase__ : List[str] = lang_id
if "n_words" in kwargs:
lowercase__ : Union[str, Any] = kwargs["n_words"]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowercase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 81 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCAmelCase_ : List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
UpperCAmelCase_ : Union[str, Any] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
UpperCAmelCase_ : Tuple = [2, 4, 1, 5]
UpperCAmelCase_ : str = len(train_data)
UpperCAmelCase_ : Tuple = 0.0_09
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Dict="train" ):
return calculate_hypothesis_value(UpperCamelCase__, UpperCamelCase__ ) - output(
UpperCamelCase__, UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
__A : Any = 0
for i in range(len(UpperCamelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : int ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowercase ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict=m ):
__A : Dict = 0
for i in range(UpperCamelCase__ ):
if index == -1:
summation_value += _error(UpperCamelCase__ )
else:
summation_value += _error(UpperCamelCase__ ) * train_data[i][0][index]
return summation_value
def _lowercase ( UpperCamelCase__ : Any ):
__A : Any = summation_of_cost_derivative(UpperCamelCase__, UpperCamelCase__ ) / m
return cost_derivative_value
def _lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__A : Tuple = 0.000002
__A : Union[str, Any] = 0
__A : int = 0
while True:
j += 1
__A : List[str] = [0, 0, 0, 0]
for i in range(0, len(UpperCamelCase__ ) ):
__A : Optional[Any] = get_cost_derivative(i - 1 )
__A : Dict = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase__, UpperCamelCase__, atol=UpperCamelCase__, rtol=UpperCamelCase__, ):
break
__A : Union[str, Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def _lowercase ( ):
for i in range(len(UpperCamelCase__ ) ):
print(('Actual output value:', output(UpperCamelCase__, 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCamelCase__, 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 365 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase_ : List[str] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCAmelCase_ : List[Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCAmelCase_ : Optional[Any] = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase_ : Optional[Any] = os.path.join(os.path.expanduser('~'), '.cache')
UpperCAmelCase_ : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple=False ):
__A : List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase__, REMOTE_MODEL_PATHS[key]['file_name'] )
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
hf_hub_download(repo_id=UpperCamelCase__, filename=UpperCamelCase__, local_dir=UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : Dict=False, UpperCamelCase__ : Tuple="text" ):
if model_type == "text":
__A : Dict = BarkSemanticModel
__A : str = BarkSemanticConfig
__A : List[str] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__A : str = BarkCoarseModel
__A : str = BarkCoarseConfig
__A : str = BarkCoarseGenerationConfig
elif model_type == "fine":
__A : List[Any] = BarkFineModel
__A : Dict = BarkFineConfig
__A : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
__A : List[str] = f"""{model_type}_small""" if use_small else model_type
__A : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'], model_info['file_name'] )
__A : Optional[int] = torch.load(UpperCamelCase__, map_location=UpperCamelCase__ )
# this is a hack
__A : int = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__A : Union[str, Any] = model_args['vocab_size']
__A : Optional[Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__A : Union[str, Any] = model_args.pop('n_head' )
__A : Optional[int] = model_args.pop('n_embd' )
__A : str = model_args.pop('n_layer' )
__A : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__A : Union[str, Any] = ModelClass(config=UpperCamelCase__ )
__A : Tuple = GenerationConfigClass()
__A : int = model_generation_config
__A : Any = checkpoint['model']
# fixup checkpoint
__A : Union[str, Any] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
__A : Optional[int] = k[len(UpperCamelCase__ ) :]
for old_layer_name in new_layer_name_dict:
__A : Union[str, Any] = new_k.replace(UpperCamelCase__, new_layer_name_dict[old_layer_name] )
__A : Optional[Any] = state_dict.pop(UpperCamelCase__ )
__A : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__A : Optional[int] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__A : Dict = set(model.state_dict().keys() ) - set(state_dict.keys() )
__A : Union[str, Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCamelCase__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(UpperCamelCase__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
__A : str = model.num_parameters(exclude_embeddings=UpperCamelCase__ )
__A : int = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6, 1 )}M params, {round(UpperCamelCase__, 3 )} loss""" )
model.eval()
model.to(UpperCamelCase__ )
del checkpoint, state_dict
return model
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : Optional[Any]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__A : Any = 'cpu' # do conversion on cpu
__A : int = _get_ckpt_path(UpperCamelCase__, use_small=UpperCamelCase__ )
__A : Any = _load_model(UpperCamelCase__, UpperCamelCase__, model_type=UpperCamelCase__, use_small=UpperCamelCase__ )
# load bark initial model
__A : Optional[int] = _bark_load_model(UpperCamelCase__, 'cpu', model_type=UpperCamelCase__, use_small=UpperCamelCase__ )
if model_type == "text":
__A : Optional[int] = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCamelCase__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__A : Optional[Any] = 5
__A : str = 10
if model_type in ["text", "coarse"]:
__A : str = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
__A : Any = bark_model(UpperCamelCase__ )[0]
__A : Union[str, Any] = model(UpperCamelCase__ )
# take last logits
__A : int = output_new_model_total.logits[:, [-1], :]
else:
__A : Optional[int] = 3
__A : int = 8
__A : List[str] = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
__A : List[str] = model(UpperCamelCase__, UpperCamelCase__ )
__A : Optional[int] = bark_model(UpperCamelCase__, UpperCamelCase__ )
__A : Dict = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, ):
__A : Optional[Any] = os.path.join(UpperCamelCase__, UpperCamelCase__ )
__A : str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase__, 'config.json' ) )
__A : Dict = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase__, 'config.json' ) )
__A : List[str] = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase__, 'config.json' ) )
__A : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__A : List[Any] = BarkSemanticModel.from_pretrained(UpperCamelCase__ )
__A : Optional[int] = BarkCoarseModel.from_pretrained(UpperCamelCase__ )
__A : str = BarkFineModel.from_pretrained(UpperCamelCase__ )
__A : Optional[int] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__A : int = BarkConfig.from_sub_model_configs(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
__A : Optional[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
__A : int = BarkModel(UpperCamelCase__ )
__A : List[str] = semantic
__A : Any = coarseAcoustic
__A : Tuple = fineAcoustic
__A : Optional[Any] = codec
__A : Dict = bark_generation_config
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
bark.save_pretrained(UpperCamelCase__, repo_id=UpperCamelCase__, push_to_hub=UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCAmelCase_ : Any = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 365 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase__ , lowerCamelCase__ )
return actual_power(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __lowerCamelCase ( __UpperCAmelCase ):
__UpperCamelCase = 'layoutlmv3'
def __init__(self , lowerCamelCase=50_265 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=1_024 , lowerCamelCase=128 , lowerCamelCase=128 , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=128 , lowerCamelCase=64 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
_lowerCAmelCase = max_ad_position_embeddings
_lowerCAmelCase = coordinate_size
_lowerCAmelCase = shape_size
_lowerCAmelCase = has_relative_attention_bias
_lowerCAmelCase = rel_pos_bins
_lowerCAmelCase = max_rel_pos
_lowerCAmelCase = has_spatial_attention_bias
_lowerCAmelCase = rel_ad_pos_bins
_lowerCAmelCase = max_rel_ad_pos
_lowerCAmelCase = text_embed
_lowerCAmelCase = visual_embed
_lowerCAmelCase = input_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = classifier_dropout
class __lowerCamelCase ( __UpperCAmelCase ):
__UpperCamelCase = version.parse('1.12' )
@property
def A__ (self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-5
@property
def A__ (self ):
'''simple docstring'''
return 12
def A__ (self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 3 , lowerCamelCase = 40 , lowerCamelCase = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
_lowerCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCAmelCase = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs | 156 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase_ = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase_ = {
"jukebox": 5_1_2,
}
class a ( __UpperCAmelCase ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict=["v3", "v2", "v2"] , snake_case__ : List[str]=512 , snake_case__ : List[str]=5 , snake_case__ : Optional[Any]="<|endoftext|>" , **snake_case__ : int , ):
"""simple docstring"""
__lowerCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
super().__init__(
unk_token=snake_case__ , n_genres=snake_case__ , version=snake_case__ , max_n_lyric_tokens=snake_case__ , **snake_case__ , )
__lowerCAmelCase = version
__lowerCAmelCase = max_n_lyric_tokens
__lowerCAmelCase = n_genres
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
__lowerCAmelCase = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowerCAmelCase = oov.replace(R"\-'" , R"\-+'" )
__lowerCAmelCase = regex.compile(snake_case__ )
__lowerCAmelCase = {v: k for k, v in self.artists_encoder.items()}
__lowerCAmelCase = {v: k for k, v in self.genres_encoder.items()}
__lowerCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = [self.artists_encoder.get(snake_case__ , 0 ) for artist in list_artists]
for genres in range(len(snake_case__ ) ):
__lowerCAmelCase = [self.genres_encoder.get(snake_case__ , 0 ) for genre in list_genres[genres]]
__lowerCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowerCAmelCase = [[self.lyrics_encoder.get(snake_case__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] ):
"""simple docstring"""
return list(snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_for_tokenization(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase = self._tokenize(snake_case__ )
return artist, genre, lyrics
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowerCAmelCase = artists[idx].lower()
__lowerCAmelCase = [genres[idx].lower()]
else:
__lowerCAmelCase = self._normalize(artists[idx] ) + ".v2"
__lowerCAmelCase = [
self._normalize(snake_case__ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowerCAmelCase = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
__lowerCAmelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
__lowerCAmelCase = {vocab[index]: index + 1 for index in range(len(snake_case__ ) )}
__lowerCAmelCase = 0
__lowerCAmelCase = len(snake_case__ ) + 1
__lowerCAmelCase = self.vocab
__lowerCAmelCase = {v: k for k, v in self.vocab.items()}
__lowerCAmelCase = ""
else:
__lowerCAmelCase = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
__lowerCAmelCase = self._run_strip_accents(snake_case__ )
__lowerCAmelCase = lyrics.replace("\\" , "\n" )
__lowerCAmelCase = self.out_of_vocab.sub("" , snake_case__ ), [], []
return artists, genres, lyrics
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = unicodedata.normalize("NFD" , snake_case__ )
__lowerCAmelCase = []
for char in text:
__lowerCAmelCase = unicodedata.category(snake_case__ )
if cat == "Mn":
continue
output.append(snake_case__ )
return "".join(snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = (
[chr(snake_case__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
__lowerCAmelCase = frozenset(snake_case__ )
__lowerCAmelCase = re.compile(R"_+" )
__lowerCAmelCase = "".join([c if c in accepted else "_" for c in text.lower()] )
__lowerCAmelCase = pattern.sub("_" , snake_case__ ).strip("_" )
return text
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] ):
"""simple docstring"""
return " ".join(snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = TensorType(snake_case__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
__lowerCAmelCase = tf.constant
__lowerCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
__lowerCAmelCase = torch.tensor
__lowerCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
__lowerCAmelCase = jnp.array
__lowerCAmelCase = _is_jax
else:
__lowerCAmelCase = np.asarray
__lowerCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowerCAmelCase = [inputs]
if not is_tensor(snake_case__ ):
__lowerCAmelCase = as_tensor(snake_case__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any]="" , snake_case__ : Union[str, Any]="pt" ):
"""simple docstring"""
__lowerCAmelCase = [0, 0, 0]
__lowerCAmelCase = [artist] * len(self.version )
__lowerCAmelCase = [genres] * len(self.version )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.tokenize(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._convert_token_to_id(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase = [-INFINITY] * len(full_tokens[-1] )
__lowerCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case__ ) )
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case__ ) )
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case__ ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = self.artists_decoder.get(snake_case__ )
__lowerCAmelCase = [self.genres_decoder.get(snake_case__ ) for genre in genres_index]
__lowerCAmelCase = [self.lyrics_decoder.get(snake_case__ ) for character in lyric_index]
return artist, genres, lyrics
| 611 | 0 |
'''simple docstring'''
from math import sqrt
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase_( SCREAMING_SNAKE_CASE_ = 10000 ):
'''simple docstring'''
lowerCamelCase : Optional[int] = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowerCamelCase : str = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase : int = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase : Any = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase : Union[str, Any] = "0" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase : str = "1" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 0 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
def merge(lowerCamelCase_ :list , lowerCamelCase_ :list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCamelCase_ ) <= 1:
return collection
snake_case_ : Union[str, Any] = len(lowerCamelCase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__A : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',') | 334 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = len(lowerCamelCase_ )
while cur > 1:
# Find the maximum number in arr
snake_case_ : Optional[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case_ : List[str] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_ )]
# Reverse whole list
snake_case_ : Any = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__A : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted)) | 334 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]:
lowercase : Optional[Any] =0
lowercase : str =len(__magic_name__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase : Optional[Any] =i + 1
else:
lowercase : Any =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
from math import factorial
UpperCamelCase__ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not isinstance(__A, __A ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__A ) )
def lowerCAmelCase_ ( __A = 60, __A = 1_000_000 ) -> int:
'''simple docstring'''
if not isinstance(__A, __A ) or not isinstance(__A, __A ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCAmelCase__ = 0
# the cached sizes of the previous chains
UpperCAmelCase__ = {}
for start_chain_element in range(1, __A ):
# The temporary set will contain the elements of the chain
UpperCAmelCase__ = set()
UpperCAmelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__A )
chain_set_length += 1
UpperCAmelCase__ = digit_factorial_sum(__A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 486 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486 | 1 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / "model_card_template.md"
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def SCREAMING_SNAKE_CASE( UpperCamelCase = None ) -> str:
UpperCAmelCase_ : Optional[int] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' ,'' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase ,UpperCamelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(UpperCamelCase ,UpperCamelCase ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ) -> Dict:
if token is None:
UpperCAmelCase_ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase_ : str = whoami(UpperCamelCase )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(UpperCamelCase ,'local_rank' ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase_ : List[Any] = args.hub_token if hasattr(UpperCamelCase ,'hub_token' ) else None
UpperCAmelCase_ : Dict = get_full_repo_name(UpperCamelCase ,token=UpperCamelCase )
UpperCAmelCase_ : Optional[int] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' ,license='apache-2.0' ,library_name='diffusers' ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=UpperCamelCase ,model_name=UpperCamelCase ,repo_name=UpperCamelCase ,dataset_name=args.dataset_name if hasattr(UpperCamelCase ,'dataset_name' ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase ,'gradient_accumulation_steps' ) else None
) ,adam_betaa=args.adam_betaa if hasattr(UpperCamelCase ,'adam_beta1' ) else None ,adam_betaa=args.adam_betaa if hasattr(UpperCamelCase ,'adam_beta2' ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase ,'adam_weight_decay' ) else None ,adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase ,'adam_epsilon' ) else None ,lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase ,'lr_scheduler' ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase ,'lr_warmup_steps' ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase ,'ema_inv_gamma' ) else None ,ema_power=args.ema_power if hasattr(UpperCamelCase ,'ema_power' ) else None ,ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase ,'ema_max_decay' ) else None ,mixed_precision=args.mixed_precision ,)
UpperCAmelCase_ : int = os.path.join(args.output_dir ,'README.md' )
model_card.save(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = None ) -> Any:
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase_ : List[str] = str(Path(UpperCamelCase ).as_posix() )
UpperCAmelCase_ : List[str] = re.search(r'snapshots/([^/]+)/' ,UpperCamelCase )
if search is None:
return None
UpperCAmelCase_ : Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, "diffusers")
def SCREAMING_SNAKE_CASE( UpperCamelCase = None ,UpperCamelCase = None ) -> None:
if new_cache_dir is None:
UpperCAmelCase_ : List[str] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase_ : str = old_diffusers_cache
UpperCAmelCase_ : Any = Path(UpperCamelCase ).expanduser()
UpperCAmelCase_ : List[Any] = Path(UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase_ : str = new_cache_dir / old_blob_path.relative_to(UpperCamelCase )
new_blob_path.parent.mkdir(parents=UpperCamelCase ,exist_ok=UpperCamelCase )
os.replace(UpperCamelCase ,UpperCamelCase )
try:
os.symlink(UpperCamelCase ,UpperCamelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = None ) -> str:
if variant is not None:
UpperCAmelCase_ : Dict = weights_name.split('.' )
UpperCAmelCase_ : Dict = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase_ : Tuple = '.'.join(UpperCamelCase )
return weights_name
def SCREAMING_SNAKE_CASE( UpperCamelCase ,*,
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,) -> int:
UpperCAmelCase_ : List[Any] = str(UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase ):
if os.path.isfile(os.path.join(UpperCamelCase ,UpperCamelCase ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase_ : int = os.path.join(UpperCamelCase ,UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) ):
UpperCAmelCase_ : Dict = os.path.join(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
UpperCAmelCase_ : int = hf_hub_download(
UpperCamelCase ,filename=_add_variant(UpperCamelCase ,UpperCamelCase ) ,cache_dir=UpperCamelCase ,force_download=UpperCamelCase ,proxies=UpperCamelCase ,resume_download=UpperCamelCase ,local_files_only=UpperCamelCase ,use_auth_token=UpperCamelCase ,user_agent=UpperCamelCase ,subfolder=UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" ,UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase ,UpperCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase ,UpperCamelCase )}' so that the correct variant file can be added.""" ,UpperCamelCase ,)
try:
# 2. Load model file as usual
UpperCAmelCase_ : int = hf_hub_download(
UpperCamelCase ,filename=UpperCamelCase ,cache_dir=UpperCamelCase ,force_download=UpperCamelCase ,proxies=UpperCamelCase ,resume_download=UpperCamelCase ,local_files_only=UpperCamelCase ,use_auth_token=UpperCamelCase ,user_agent=UpperCamelCase ,subfolder=UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 471 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Any = np.argmax(UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[str]:
with open(UpperCamelCase ,encoding='utf_8' ) as f:
UpperCAmelCase_ : int = csv.reader(UpperCamelCase )
UpperCAmelCase_ : Tuple = []
next(UpperCamelCase ) # skip the first line
for line in tqdm(UpperCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : str = []
for dataset in encoded_datasets:
UpperCAmelCase_ : str = len(UpperCamelCase )
UpperCAmelCase_ : str = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch, 2) ,dtype=np.intaa )
UpperCAmelCase_ : Any = np.full((n_batch, 2, input_len) ,fill_value=-1_0_0 ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Tuple = with_conta
UpperCAmelCase_ : Union[str, Any] = len(UpperCamelCase ) - 1
UpperCAmelCase_ : int = len(UpperCamelCase ) - 1
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Optional[int] = mc_label
UpperCAmelCase_ : Tuple = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE( ) -> str:
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' ,type=UpperCamelCase ,default='openai-gpt' ,help='pretrained model name' )
parser.add_argument('--do_train' ,action='store_true' ,help='Whether to run training.' )
parser.add_argument('--do_eval' ,action='store_true' ,help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' ,default=UpperCamelCase ,type=UpperCamelCase ,required=UpperCamelCase ,help='The output directory where the model predictions and checkpoints will be written.' ,)
parser.add_argument('--train_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--eval_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--seed' ,type=UpperCamelCase ,default=4_2 )
parser.add_argument('--num_train_epochs' ,type=UpperCamelCase ,default=3 )
parser.add_argument('--train_batch_size' ,type=UpperCamelCase ,default=8 )
parser.add_argument('--eval_batch_size' ,type=UpperCamelCase ,default=1_6 )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=UpperCamelCase ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,type=UpperCamelCase ,default=1 )
parser.add_argument(
'--max_steps' ,default=-1 ,type=UpperCamelCase ,help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=UpperCamelCase ,default=1 ,help='Number of updates steps to accumulate before performing a backward/update pass.' ,)
parser.add_argument('--learning_rate' ,type=UpperCamelCase ,default=6.25e-5 )
parser.add_argument('--warmup_steps' ,default=0 ,type=UpperCamelCase ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' ,type=UpperCamelCase ,default='warmup_linear' )
parser.add_argument('--weight_decay' ,type=UpperCamelCase ,default=0.01 )
parser.add_argument('--lm_coef' ,type=UpperCamelCase ,default=0.9 )
parser.add_argument('--n_valid' ,type=UpperCamelCase ,default=3_7_4 )
parser.add_argument('--server_ip' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
parser.add_argument('--server_port' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
print(UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase_ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCamelCase ,UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase_ : Optional[int] = ['_start_', '_delimiter_', '_classify_']
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase ) )
model.to(UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase ):
if isinstance(UpperCamelCase ,UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase ) )
elif isinstance(UpperCamelCase ,UpperCamelCase ):
return obj
return [tokenize_and_encode(UpperCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
UpperCAmelCase_ : List[str] = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase_ : Dict = (train_dataset, eval_dataset)
UpperCAmelCase_ : Union[str, Any] = tokenize_and_encode(UpperCamelCase )
# Compute the max input length for the Transformer
UpperCAmelCase_ : Dict = model.config.n_positions // 2 - 2
UpperCAmelCase_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase_ : Dict = min(UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase_ : Optional[int] = pre_process_datasets(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,*UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase_ : Optional[int] = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = RandomSampler(UpperCamelCase )
UpperCAmelCase_ : int = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.train_batch_size )
UpperCAmelCase_ : int = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[int] = SequentialSampler(UpperCamelCase )
UpperCAmelCase_ : Tuple = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase_ : List[str] = args.max_steps
UpperCAmelCase_ : List[str] = args.max_steps // (len(UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase_ : List[str] = len(UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase_ : Any = list(model.named_parameters() )
UpperCAmelCase_ : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCAmelCase_ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCAmelCase_ : Any = AdamW(UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=UpperCamelCase )
if args.do_train:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='Epoch' ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = tqdm(UpperCamelCase ,desc='Training' )
for step, batch in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch
UpperCAmelCase_ : Dict = model(UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase_ : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase_ : List[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase_ : int = model.module if hasattr(UpperCamelCase ,'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase_ : int = os.path.join(args.output_dir ,UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(args.output_dir ,UpperCamelCase )
torch.save(model_to_save.state_dict() ,UpperCamelCase )
model_to_save.config.to_json_file(UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase )
if args.do_eval:
model.eval()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 0, 0
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for batch in tqdm(UpperCamelCase ,desc='Evaluating' ):
UpperCAmelCase_ : Union[str, Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = batch
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = model(
UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = mc_logits.detach().cpu().numpy()
UpperCAmelCase_ : int = mc_labels.to('cpu' ).numpy()
UpperCAmelCase_ : Optional[int] = accuracy(UpperCamelCase ,UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase_ : str = eval_loss / nb_eval_steps
UpperCAmelCase_ : str = eval_accuracy / nb_eval_examples
UpperCAmelCase_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase_ : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir ,'eval_results.txt' )
with open(UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,UpperCamelCase ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 471 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Dict=64 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=[1, 16, 4, 4] , UpperCAmelCase__ : int=None , ) ->List[str]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase_ = (self.image_size // 32) ** 2
UpperCAmelCase_ = num_patches + 1
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]:
UpperCAmelCase_ = ViTHybridModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) ->str:
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = ViTHybridForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->int:
UpperCAmelCase_ = ViTHybridModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(UpperCAmelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=UpperCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ViTHybridModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
UpperCAmelCase_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**UpperCAmelCase__ )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
UpperCAmelCase_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
UpperCAmelCase_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = model(**UpperCAmelCase__ )
UpperCAmelCase_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase_ : List[str] = 50_003
UpperCamelCase_ : int = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer
UpperCamelCase__ = None
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ = PLBartTokenizer(lowercase_ ,language_codes="base" ,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : str ):
a__ = PLBartTokenizer(lowercase_ ,language_codes="base" ,keep_accents=lowercase_ )
a__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
a__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 ,lowercase_ )]
self.assertListEqual(lowercase_ ,["__java__", "__python__", "__en_XX__", "<mask>"] )
a__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a__ = tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ ,clean_up_tokenization_spaces=lowercase_ ) ,lowercase_ ,)
def lowerCAmelCase_ ( self : str ):
a__ = PLBartTokenizer(lowercase_ ,language_codes="multi" ,keep_accents=lowercase_ )
a__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
a__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 ,lowercase_ )]
self.assertListEqual(
lowercase_ ,["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
a__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a__ = tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ ,clean_up_tokenization_spaces=lowercase_ ) ,lowercase_ ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase__ = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] ):
a__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="base" ,src_lang="python" ,tgt_lang="en_XX" )
a__ = 1
return cls
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] ,5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] ,5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] ,5_00_03 )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowercase_ )
def lowerCAmelCase_ ( self : int ):
self.assertIn(lowercase_ ,self.tokenizer.all_special_ids )
a__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
a__ = self.tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ )
a__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ ,lowercase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowercase_ )
def lowerCAmelCase_ ( self : str ):
a__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] ,lowercase_ )
a__ = 10
a__ = self.tokenizer(lowercase_ ,max_length=lowercase_ ,truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowercase_ )
self.assertEqual(len(lowercase_ ) ,lowercase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) ,[5_00_04, 5_00_01] )
def lowerCAmelCase_ ( self : Tuple ):
a__ = tempfile.mkdtemp()
a__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
a__ = PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowercase_ )
@require_torch
def lowerCAmelCase_ ( self : int ):
a__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowercase_ ,return_tensors="pt" )
a__ = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : str ):
a__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowercase_ ,truncation=lowercase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
a__ = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ ,lowercase_ )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowercase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.tokenizer(self.src_text ,padding=lowercase_ ,truncation=lowercase_ ,max_length=3 ,return_tensors="pt" )
a__ = self.tokenizer(
text_target=self.tgt_text ,padding=lowercase_ ,truncation=lowercase_ ,max_length=10 ,return_tensors="pt" )
a__ = targets["input_ids"]
a__ = shift_tokens_right(lowercase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="java" )
self.assertEqual(
nested_simplify(lowercase_ ) ,{
# A, test, EOS, en_XX
"input_ids": [[1_50, 2_42, 2, 5_00_03]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_00_01,
} ,)
| 716 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ):
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a__ = test_metrics
@require_cpu
def lowerCAmelCase_ ( self : Tuple ):
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def lowerCAmelCase_ ( self : Optional[int] ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCAmelCase_ ( self : List[Any] ):
self.test_metrics.main()
@require_multi_gpu
def lowerCAmelCase_ ( self : Union[str, Any] ):
print(f'Found {torch.cuda.device_count()} devices.' )
a__ = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ ,env=os.environ.copy() )
| 394 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger()
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : LevitConfig, lowerCAmelCase_ : Path, lowerCAmelCase_ : bool = True ):
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__lowerCAmelCase = timm.create_model('levit_128s', pretrained=lowerCAmelCase_ )
else:
__lowerCAmelCase = timm.create_model('levit_128', pretrained=lowerCAmelCase_ )
if hidden_sizes == 192:
__lowerCAmelCase = timm.create_model('levit_192', pretrained=lowerCAmelCase_ )
if hidden_sizes == 256:
__lowerCAmelCase = timm.create_model('levit_256', pretrained=lowerCAmelCase_ )
if hidden_sizes == 384:
__lowerCAmelCase = timm.create_model('levit_384', pretrained=lowerCAmelCase_ )
from_model.eval()
__lowerCAmelCase = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = from_model.state_dict()
__lowerCAmelCase = list(from_model.state_dict().keys() )
__lowerCAmelCase = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
__lowerCAmelCase = torch.randn((2, 3, 224, 224) )
__lowerCAmelCase = from_model(lowerCAmelCase_ )
__lowerCAmelCase = our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
__lowerCAmelCase = name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowerCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def a_ ( lowerCAmelCase_ : Path, lowerCAmelCase_ : str = None, lowerCAmelCase_ : bool = True ):
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = 1000
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
__lowerCAmelCase = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
__lowerCAmelCase = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_snake_case : List[Any] = parser.parse_args()
_snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 53 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__A = Mapping[str, np.ndarray]
__A = Mapping[str, Any] # Is a nested dict.
__A = 0.01
@dataclasses.dataclass(frozen=snake_case )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A_ = None
# Templates used to generate this protein (prediction-only)
A_ = None
# Chain corresponding to each parent
A_ = None
def __A ( _lowercase ):
'''simple docstring'''
_A = R'''(\[[A-Z]+\]\n)'''
_A = [tag.strip() for tag in re.split(_lowercase , _lowercase ) if len(_lowercase ) > 0]
_A = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
_A = ["N", "CA", "C"]
_A = None
_A = None
_A = None
for g in groups:
if "[PRIMARY]" == g[0]:
_A = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
_A = '''X''' # FIXME: strings are immutable
_A = np.array(
[residue_constants.restype_order.get(_lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_A = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase , g[1][axis].split() ) ) )
_A = np.array(_lowercase )
_A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
_A = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_A = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
_A = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
_A = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase , atom_mask=_lowercase , aatype=_lowercase , residue_index=np.arange(len(_lowercase ) ) , b_factors=_lowercase , )
def __A ( _lowercase , _lowercase = 0 ):
'''simple docstring'''
_A = []
_A = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
_A = prot.parents
_A = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_A = [p for i, p in zip(_lowercase , _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
_A = ['''N/A''']
pdb_headers.append(f"""PARENT {" ".join(_lowercase )}""" )
return pdb_headers
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = []
_A = pdb_str.split('''\n''' )
_A = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
_A = 42
if prot.parents is not None and len(prot.parents ) > 0:
_A = []
if prot.parents_chain_index is not None:
_A = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ) , [] )
parent_dict[str(_lowercase )].append(_lowercase )
_A = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_A = parent_dict.get(str(_lowercase ) , ['''N/A'''] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_A = [['''N/A''']]
def make_parent_line(_lowercase ) -> str:
return f"""PARENT {" ".join(_lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_A = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
_A = parents_per_chain[chain_counter]
else:
_A = ['''N/A''']
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = residue_constants.restypes + ['''X''']
def res_atoa(_lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
_A = residue_constants.atom_types
_A = []
_A = prot.atom_mask
_A = prot.aatype
_A = prot.atom_positions
_A = prot.residue_index.astype(np.intaa )
_A = prot.b_factors
_A = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
_A = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
_A = aatype.shape[0]
_A = 1
_A = 0
_A = string.ascii_uppercase
_A = None
# Add all atom sites.
for i in range(_lowercase ):
_A = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_A = '''ATOM'''
_A = atom_name if len(_lowercase ) == 4 else f""" {atom_name}"""
_A = ''''''
_A = ''''''
_A = 1.00
_A = atom_name[0] # Protein supports only C, N, O, S, this works.
_A = ''''''
_A = '''A'''
if chain_index is not None:
_A = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_A = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_lowercase )
atom_index += 1
_A = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_A = True
_A = chain_index[i + 1]
if should_terminate:
# Close the chain.
_A = '''TER'''
_A = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase , _lowercase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_lowercase , remark=_lowercase , parents=_lowercase , parents_chain_index=_lowercase , )
| 484 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCamelCase ( a) ->Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCamelCase ( ) ->Optional[int]:
with parallel_backend("spark"):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(a):
with parallel_backend("unsupported backend"):
map_nested(a, a, num_proc=2)
with pytest.raises(a):
with parallel_backend("unsupported backend"):
map_nested(a, a, num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1])
def __UpperCamelCase ( a) ->str:
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {"a": 1, "b": 2}
lowerCamelCase__ = {"a": [1, 2], "b": [3, 4]}
lowerCamelCase__ = {"a": {"1": 1}, "b": 2}
lowerCamelCase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {"a": 2, "b": 3}
lowerCamelCase__ = {"a": [2, 3], "b": [4, 5]}
lowerCamelCase__ = {"a": {"1": 2}, "b": 3}
lowerCamelCase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark"):
assert map_nested(a, a, num_proc=a) == expected_map_nested_sa
assert map_nested(a, a, num_proc=a) == expected_map_nested_sa
assert map_nested(a, a, num_proc=a) == expected_map_nested_sa
assert map_nested(a, a, num_proc=a) == expected_map_nested_sa
assert map_nested(a, a, num_proc=a) == expected_map_nested_sa
| 360 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ = get_logger(__name__)
A_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
else:
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ = temperature
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(_lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(_lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(_lowerCAmelCase , _lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(_lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(_lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(_lowerCAmelCase , _lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = bos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = dict(_lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
def _force_token(_lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(_lowerCAmelCase , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(_lowerCAmelCase , _lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCAmelCase , "max_initial_timestamp_index" ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCAmelCase , _lowerCAmelCase , )
return jnp.where(
_lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
_lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
return scores
| 360 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
UpperCAmelCase = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowercase__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCAmelCase = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
UpperCAmelCase = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowercase__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCAmelCase = requests.get(lowerCAmelCase , headers=lowerCAmelCase , allow_redirects=lowerCAmelCase )
UpperCAmelCase = result.headers['Location']
UpperCAmelCase = requests.get(lowerCAmelCase , allow_redirects=lowerCAmelCase )
UpperCAmelCase = os.path.join(lowerCAmelCase , F"{artifact_name}.zip" )
with open(lowerCAmelCase , 'wb' ) as fp:
fp.write(response.content )
def lowercase__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str]=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = None
with zipfile.ZipFile(lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase ) as f:
for line in f:
UpperCAmelCase = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase = line[: line.index(': ' )]
UpperCAmelCase = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
UpperCAmelCase = line[len('FAILED ' ) :]
failed_tests.append(lowerCAmelCase )
elif filename == "job_name.txt":
UpperCAmelCase = line
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase )} for `errors` "
F"and {len(lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
UpperCAmelCase = None
if job_name and job_links:
UpperCAmelCase = job_links.get(lowerCAmelCase , lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase , lowerCAmelCase )]
return result
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Any=None ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = [os.path.join(lowerCAmelCase , lowerCAmelCase ) for p in os.listdir(lowerCAmelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase , job_links=lowerCAmelCase ) )
return errors
def lowercase__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase = counter.most_common()
UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase = dict(sorted(r.items() , key=lambda lowerCAmelCase : item[1]["count"] , reverse=lowerCAmelCase ) )
return r
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = test.split('::' )[0]
if test.startswith('tests/models/' ):
UpperCAmelCase = test.split('/' )[2]
else:
UpperCAmelCase = None
return test
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase = [x for x in logs if x[2] is not None]
UpperCAmelCase = {x[2] for x in logs}
UpperCAmelCase = {}
for test in tests:
UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase = counter.most_common()
UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase = {'count': n_errors, 'errors': error_counts}
UpperCAmelCase = dict(sorted(r.items() , key=lambda lowerCAmelCase : item[1]["count"] , reverse=lowerCAmelCase ) )
return r
def lowercase__ ( lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = '| no. | error | status |'
UpperCAmelCase = '|-:|:-|:-|'
UpperCAmelCase = [header, sep]
for error in reduced_by_error:
UpperCAmelCase = reduced_by_error[error]['count']
UpperCAmelCase = F"| {count} | {error[:100]} | |"
lines.append(lowerCAmelCase )
return "\n".join(lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = '| model | no. of errors | major error | count |'
UpperCAmelCase = '|-:|-:|-:|-:|'
UpperCAmelCase = [header, sep]
for model in reduced_by_model:
UpperCAmelCase = reduced_by_model[model]['count']
UpperCAmelCase , UpperCAmelCase = list(reduced_by_model[model]['errors'].items() )[0]
UpperCAmelCase = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(lowerCAmelCase )
return "\n".join(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
SCREAMING_SNAKE_CASE_ = get_job_links(args.workflow_run_id, token=args.token)
SCREAMING_SNAKE_CASE_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
SCREAMING_SNAKE_CASE_ = k.find(''' / ''')
SCREAMING_SNAKE_CASE_ = k[index + len(''' / ''') :]
SCREAMING_SNAKE_CASE_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
SCREAMING_SNAKE_CASE_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
SCREAMING_SNAKE_CASE_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
SCREAMING_SNAKE_CASE_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE_ = reduce_by_error(errors)
SCREAMING_SNAKE_CASE_ = reduce_by_model(errors)
SCREAMING_SNAKE_CASE_ = make_github_table(reduced_by_error)
SCREAMING_SNAKE_CASE_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 373 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Tuple = "ibert"
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1E-12 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=False , lowercase_="none" , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = quant_mode
UpperCAmelCase = force_dequant
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 373 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowercase = str(lowerCamelCase_ )
__lowercase = ''''''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCAmelCase ( lowerCamelCase_ : float = 9_9 ):
if not 0 < percent < 1_0_0:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(9_9)}''')
| 56 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
__a = []
__a = []
__a = 0
__a = sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return result
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , __SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
SCREAMING_SNAKE_CASE_ = [3, 34, 4, 12, 5, 2]
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 582 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ : Optional[int] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 548 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( SCREAMING_SNAKE_CASE__):
__lowerCAmelCase : Optional[int] = """dpt"""
def __init__( self : List[str] , _lowerCamelCase : Optional[Any]=7_68 , _lowerCamelCase : Any=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : str=30_72 , _lowerCamelCase : int="gelu" , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Optional[int]=3_84 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Dict=True , _lowerCamelCase : int=[2, 5, 8, 11] , _lowerCamelCase : Optional[int]="project" , _lowerCamelCase : Optional[Any]=[4, 2, 1, 0.5] , _lowerCamelCase : Tuple=[96, 1_92, 3_84, 7_68] , _lowerCamelCase : Union[str, Any]=2_56 , _lowerCamelCase : int=-1 , _lowerCamelCase : Any=False , _lowerCamelCase : str=True , _lowerCamelCase : Union[str, Any]=0.4 , _lowerCamelCase : Optional[Any]=2_55 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Optional[int]=[1, 10_24, 24, 24] , _lowerCamelCase : Tuple=[0, 1] , _lowerCamelCase : str=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**snake_case__ )
A_ : Union[str, Any] = hidden_size
A_ : List[str] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : Optional[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
A_ : str = BitConfig(**snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : Optional[Any] = BitConfig(**snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
A_ : str = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
A_ : Any = backbone_featmap_shape
A_ : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
A_ : List[Any] = None
A_ : int = None
A_ : List[Any] = []
A_ : Tuple = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : List[Any] = image_size
A_ : List[str] = patch_size
A_ : str = num_channels
A_ : Dict = qkv_bias
A_ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
A_ : List[Any] = readout_type
A_ : List[str] = reassemble_factors
A_ : Optional[Any] = neck_hidden_sizes
A_ : Union[str, Any] = fusion_hidden_size
A_ : Any = head_in_index
A_ : int = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Tuple = auxiliary_loss_weight
A_ : Dict = semantic_loss_ignore_index
A_ : Any = semantic_classifier_dropout
def a_ ( self : Dict ):
"""simple docstring"""
A_ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A_ : Union[str, Any] = self.backbone_config.to_dict()
A_ : int = self.__class__.model_type
return output
| 719 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
A_ : int = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
A_ : int = format(_UpperCAmelCase , '''08x''' )[-8:]
A_ : Optional[Any] = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
A_ : Optional[Any] = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
A_ : Union[str, Any] = bit_string[pos : pos + 512]
A_ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
A_ : str = format(_UpperCAmelCase , '''032b''' )
A_ : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return (a + b) % 2**32
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = preprocess(_UpperCAmelCase )
A_ : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A_ : List[str] = 0x6_7_4_5_2_3_0_1
A_ : str = 0xe_f_c_d_a_b_8_9
A_ : Union[str, Any] = 0x9_8_b_a_d_c_f_e
A_ : Dict = 0x1_0_3_2_5_4_7_6
A_ : int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
A_ : str = aa
A_ : Tuple = ba
A_ : Tuple = ca
A_ : int = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A_ : str = d ^ (b & (c ^ d))
A_ : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A_ : List[Any] = c ^ (d & (b ^ c))
A_ : Optional[int] = (5 * i + 1) % 16
elif i <= 47:
A_ : Optional[Any] = b ^ c ^ d
A_ : List[Any] = (3 * i + 5) % 16
else:
A_ : Dict = c ^ (b | not_aa(_UpperCAmelCase ))
A_ : Dict = (7 * i) % 16
A_ : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
A_ : List[str] = d
A_ : str = c
A_ : Tuple = b
A_ : Tuple = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
A_ : str = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : int = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : int = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : Optional[Any] = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
A_ : Dict = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[str] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase : Optional[Any] = False
try:
lowerCamelCase : Union[str, Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class A__ :
def __init__( self : Tuple , _a : str = None , _a : list = [] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =choices
_SCREAMING_SNAKE_CASE =prompt
if sys.platform == "win32":
_SCREAMING_SNAKE_CASE ='*'
else:
_SCREAMING_SNAKE_CASE ='➔ '
def A ( self : Dict , _a : Union[str, Any] , _a : str = "" ) -> Dict:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def A ( self : str , _a : int ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(_a )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def A ( self : Tuple , _a : Direction , _a : int = 1 ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(chr(self.current_selection ) )
_SCREAMING_SNAKE_CASE =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def A ( self : str , _a : int = 0 ) -> Optional[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_SCREAMING_SNAKE_CASE =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_SCREAMING_SNAKE_CASE =int(builtins.input() )
except ValueError:
_SCREAMING_SNAKE_CASE =default_choice
else:
_SCREAMING_SNAKE_CASE =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_a , '\n' )
return choice
| 405 | 1 |
import os
import numpy
import onnx
def UpperCamelCase_( _A :Tuple , _A :Tuple )-> Optional[int]:
UpperCamelCase__ = a.name
UpperCamelCase__ = b.name
UpperCamelCase__ = """"""
UpperCamelCase__ = """"""
UpperCamelCase__ = a == b
UpperCamelCase__ = name_a
UpperCamelCase__ = name_b
return res
def UpperCamelCase_( _A :Dict , _A :List[str] , _A :str )-> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def UpperCamelCase_( _A :List[Any] , _A :Any , _A :Any )-> Tuple:
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def UpperCamelCase_( _A :Dict , _A :List[Any] , _A :Tuple )-> str:
UpperCamelCase__ = list(model.graph.initializer )
UpperCamelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__ = inits[i].name
UpperCamelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def UpperCamelCase_( _A :Optional[int] )-> Optional[int]:
UpperCamelCase__ = os.path.dirname(_A )
UpperCamelCase__ = os.path.basename(_A )
UpperCamelCase__ = onnx.load(os.path.join(_A , _A ) )
UpperCamelCase__ = list(model.graph.initializer )
UpperCamelCase__ = set()
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
UpperCamelCase__ = inits[j].data_type
UpperCamelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _A )
total_reduced_size += mem_size
UpperCamelCase__ = inits[i].name
UpperCamelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
UpperCamelCase__ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
UpperCamelCase__ = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
UpperCamelCase__ = """optimized_""" + model_file_name
UpperCamelCase__ = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 705 |
from typing import Any
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
UpperCamelCase__ = None
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = None
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.head
while temp is not None:
print(temp.data , end=" " )
UpperCamelCase__ = temp.next
print()
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = Node(snake_case )
UpperCamelCase__ = self.head
UpperCamelCase__ = new_node
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
if node_a is None or node_a is None:
return
UpperCamelCase__, UpperCamelCase__ = node_a.data, node_a.data
if __name__ == "__main__":
__UpperCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 185 | 0 |
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCAmelCase__ ) -> None:
__magic_name__ : str = num_of_nodes
__magic_name__ : list[list[int]] = []
__magic_name__ : dict[int, int] = {}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
__magic_name__ : Union[str, Any] = self.find_component(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if component_size[u_node] <= component_size[v_node]:
__magic_name__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__magic_name__ : Tuple = self.find_component(lowerCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase__ )
def __magic_name__ ( self ) -> None:
__magic_name__ : Tuple = []
__magic_name__ : str = 0
__magic_name__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__magic_name__ : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = edge
__magic_name__ : List[Any] = self.m_component[u]
__magic_name__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__magic_name__ : Any = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ ,__magic_name__ ,__magic_name__ : str = edge
__magic_name__ : Dict = self.m_component[u]
__magic_name__ : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
__magic_name__ : Tuple = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def UpperCamelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Any = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
__magic_name__ : List[str] = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(_A ):
os.makedirs(_A )
__magic_name__ : str = model.state_dict()
def to_tf_var_name(_A ):
for patt, repl in iter(_A ):
__magic_name__ : int = name.replace(_A, _A )
return f'bert/{name}'
def create_tf_var(_A, _A, _A ):
__magic_name__ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__magic_name__ : int = tf.get_variable(dtype=_A, shape=tensor.shape, name=_A, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__magic_name__ : int = to_tf_var_name(_A )
__magic_name__ : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__magic_name__ : Tuple = torch_tensor.T
__magic_name__ : int = create_tf_var(tensor=_A, name=_A, session=_A )
tf.keras.backend.set_value(_A, _A )
__magic_name__ : int = session.run(_A )
print(f'Successfully created {tf_name}: {np.allclose(_A, _A )}' )
__magic_name__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(_A, os.path.join(_A, model_name.replace("""-""", """_""" ) + """.ckpt""" ) )
def UpperCamelCase ( _A=None ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=_A, required=_A, help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""", type=_A, default=_A, required=_A, help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""", type=_A, required=_A, help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""", type=_A, required=_A, help="""Directory in which to save tensorflow model""" )
__magic_name__ : List[str] = parser.parse_args(_A )
__magic_name__ : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=_A, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 324 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
A = TypeVar('T')
class __snake_case ( Generic[T]):
def __init__( self, A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = data
lowerCamelCase : Node[T] | None = None
def __str__( self ):
"""simple docstring"""
return F'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self ):
"""simple docstring"""
lowerCamelCase : Node[T] | None = None
def __iter__( self ):
"""simple docstring"""
lowerCamelCase : str = self.top
while node:
yield node.data
lowerCamelCase : Any = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(A ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.top is None
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : List[Any] = Node(A )
if not self.is_empty():
lowerCamelCase : str = self.top
lowerCamelCase : List[Any] = node
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top, A )
lowerCamelCase : List[str] = self.top
lowerCamelCase : Dict = self.top.next
return pop_node.data
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 449 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : List[Any] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Tuple = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : List[Any] = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 449 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase__ = spec.loader.load_module()
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCAmelCase__ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCAmelCase__( ):
"""simple docstring"""
__A= []
for config_class in list(CONFIG_MAPPING.values() ):
__A= False
# source code of `config_class`
__A= inspect.getsource(UpperCamelCase_ )
__A= _re_checkpoint.findall(UpperCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__A, __A= checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__A= f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__A= True
break
__A= config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__A= '\n'.join(sorted(UpperCamelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 186 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , ) -> Tuple:
__snake_case : Union[str, Any] = parent
__snake_case : Dict = 13
__snake_case : Tuple = 7
__snake_case : Dict = 30
__snake_case : Any = self.seq_length + self.mem_len
__snake_case : int = 15
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = True
__snake_case : List[Any] = 99
__snake_case : Optional[int] = [10, 50, 80]
__snake_case : List[str] = 32
__snake_case : List[Any] = 32
__snake_case : int = 4
__snake_case : Optional[Any] = 8
__snake_case : Union[str, Any] = 128
__snake_case : Optional[Any] = 2
__snake_case : Any = 2
__snake_case : str = None
__snake_case : Optional[int] = 1
__snake_case : str = 0
__snake_case : Union[str, Any] = 3
__snake_case : Tuple = self.vocab_size - 1
__snake_case : List[str] = 0.01
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : Any ) -> Union[str, Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : List[str] = TFTransfoXLModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : int ) -> Dict:
__snake_case : int = TFTransfoXLLMHeadModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Tuple = {"input_ids": input_ids_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
__snake_case , __snake_case : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
__snake_case : List[str] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int] ) -> List[str]:
__snake_case : str = TFTransfoXLForSequenceClassification(lowerCamelCase )
__snake_case : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Tuple = config_and_inputs
__snake_case : Dict = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : List[Any] = () if is_tf_available() else ()
__UpperCAmelCase : Dict = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : int = False
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : int ) -> int:
__snake_case : List[Any] = TFTransfoXLModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=lowerCamelCase , d_embed=37 )
def __snake_case ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ) -> Dict:
self.model_tester.set_seed()
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
self.model_tester.set_seed()
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Tuple:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__snake_case : str = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__snake_case : int = model.get_output_embeddings()
assert isinstance(lowerCamelCase , tf.keras.layers.Layer )
__snake_case : Optional[Any] = model.get_bias()
assert name is None
else:
__snake_case : Optional[int] = model.get_output_embeddings()
assert x is None
__snake_case : Tuple = model.get_bias()
assert name is None
def __snake_case ( self : List[Any] ) -> List[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : Optional[int] ) -> Dict:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = TFTransfoXLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def __snake_case ( self : str ) -> Optional[int]:
pass
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Dict = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__snake_case : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__snake_case : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__snake_case : Optional[int] = model.generate(lowerCamelCase , max_length=200 , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase )
| 81 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: str = logging.get_logger(__name__)
A__: Dict = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _a ( __A):
"""simple docstring"""
UpperCamelCase__ = """xlm-prophetnet"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self: Dict , __lowerCamelCase: Tuple = 0.1 , __lowerCamelCase: List[str] = "gelu" , __lowerCamelCase: List[str] = 3_0522 , __lowerCamelCase: List[str] = 1024 , __lowerCamelCase: List[str] = 4096 , __lowerCamelCase: Tuple = 12 , __lowerCamelCase: Union[str, Any] = 16 , __lowerCamelCase: Any = 4096 , __lowerCamelCase: Union[str, Any] = 12 , __lowerCamelCase: Union[str, Any] = 16 , __lowerCamelCase: Dict = 0.1 , __lowerCamelCase: Union[str, Any] = 0.1 , __lowerCamelCase: Optional[int] = 512 , __lowerCamelCase: Optional[int] = 0.02 , __lowerCamelCase: Optional[Any] = True , __lowerCamelCase: Union[str, Any] = True , __lowerCamelCase: List[Any] = 0 , __lowerCamelCase: Optional[Any] = 2 , __lowerCamelCase: List[Any] = 32 , __lowerCamelCase: int = 128 , __lowerCamelCase: Union[str, Any] = False , __lowerCamelCase: Dict = 0.0 , __lowerCamelCase: Tuple = True , __lowerCamelCase: Tuple = 0 , __lowerCamelCase: str = 1 , __lowerCamelCase: str = 2 , **__lowerCamelCase: List[Any] , ):
'''simple docstring'''
UpperCamelCase__: List[str] = vocab_size
UpperCamelCase__: Union[str, Any] = hidden_size
UpperCamelCase__: int = encoder_ffn_dim
UpperCamelCase__: Dict = num_encoder_layers
UpperCamelCase__: Dict = num_encoder_attention_heads
UpperCamelCase__: str = decoder_ffn_dim
UpperCamelCase__: str = num_decoder_layers
UpperCamelCase__: Any = num_decoder_attention_heads
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: int = init_std # Normal(0, this parameter)
UpperCamelCase__: Optional[int] = activation_function
# parameters for xlmprophetnet
UpperCamelCase__: Optional[Any] = ngram
UpperCamelCase__: Optional[int] = num_buckets
UpperCamelCase__: List[Any] = relative_max_distance
UpperCamelCase__: str = disable_ngram_loss
UpperCamelCase__: int = eps
# 3 Types of Dropout
UpperCamelCase__: Optional[Any] = attention_dropout
UpperCamelCase__: Any = activation_dropout
UpperCamelCase__: Tuple = dropout
UpperCamelCase__: Optional[int] = use_cache
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , add_cross_attention=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Any ):
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 704 |
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if principal <= 0:
raise Exception("Principal borrowed must be > 0")
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0")
if years_to_repay <= 0 or not isinstance(A_ ,A_):
raise Exception("Years to repay must be an integer > 0")
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__: str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__: Dict = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
def __init__( self : List[Any] , _A : Optional[int] , ):
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[Any] ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ):
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : Tuple , _A : Tuple , _A : List[Any] , ):
_UpperCamelCase = True
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
_UpperCamelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Tuple ):
_UpperCamelCase = TFEsmForMaskedLM(config=_A )
_UpperCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFEsmForTokenClassification(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = TFEsmModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : str ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCamelCase = model(_A )[0]
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Any = seq_length
_SCREAMING_SNAKE_CASE : Dict = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_input_mask
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : str = vocab_size
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = projection_dim
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
_SCREAMING_SNAKE_CASE : Any = attention_dropout
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : str = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = scope
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_SCREAMING_SNAKE_CASE : str = input_mask.numpy()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = input_mask.shape
_SCREAMING_SNAKE_CASE : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlipTextModel(config=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
_SCREAMING_SNAKE_CASE : int = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = (TFBlipTextModel,) if is_tf_available() else ()
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = BlipTextModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Optional[int] = TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 295 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295 | 1 |
import random
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = num - 1
lowerCamelCase_ = 0
while s % 2 == 0:
lowerCamelCase_ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ = random.randrange(2 , num - 1 )
lowerCamelCase_ = pow(lowercase , lowercase , lowercase )
if v != 1:
lowerCamelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ = i + 1
lowerCamelCase_ = (v**2) % num
return True
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if num < 2:
return False
lowerCamelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_24 ):
'''simple docstring'''
while True:
lowerCamelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
lowerCamelCase : List[Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 70 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = BlenderbotSmallConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
UpperCAmelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 663 | 0 |
import re
from filelock import FileLock
try:
import nltk
__snake_case :List[Any] = True
except (ImportError, ModuleNotFoundError):
__snake_case :Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __snake_case ( _UpperCAmelCase ):
re.sub('''<n>''' , '''''' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _snake_case ( __snake_case : List[str] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any]=False , __snake_case : Any=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowerCamelCase : Tuple = cached_file(__snake_case , __snake_case , force_download=not use_cached_models )
_lowerCamelCase : Dict = config_class.from_json_file(__snake_case )
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = True
print(F'Building TensorFlow model from configuration: {config}' )
_lowerCamelCase : Optional[Any] = model_class(__snake_case )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowerCamelCase : Tuple = cached_file(
__snake_case , __snake_case , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowerCamelCase : List[str] = load_pytorch_checkpoint_in_tfa_model(__snake_case , __snake_case )
if compare_with_pt_model:
_lowerCamelCase : int = tf_model(tf_model.dummy_inputs , training=__snake_case ) # build the network
_lowerCamelCase : Optional[Any] = torch.load(__snake_case , map_location="""cpu""" )
_lowerCamelCase : str = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
with torch.no_grad():
_lowerCamelCase : Dict = pt_model(**pt_model.dummy_inputs )
_lowerCamelCase : str = pto[0].numpy()
_lowerCamelCase : Any = tfo[0].numpy()
_lowerCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(__snake_case , save_format="""h5""" )
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : str=False , __snake_case : Optional[Any]=False , __snake_case : Optional[Any]=False , __snake_case : Tuple=False , ):
"""simple docstring"""
if args_model_type is None:
_lowerCamelCase : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
_lowerCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(__snake_case , start=1 ):
print("""=""" * 100 )
print(F' Converting model type {j}/{len(__snake_case )}: {model_type}' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowerCamelCase : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowerCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__snake_case , __snake_case ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_lowerCamelCase : int = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(__snake_case )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
_lowerCamelCase : Optional[Any] = cached_file(__snake_case , __snake_case , force_download=not use_cached_models )
else:
_lowerCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowerCamelCase : int = cached_file(__snake_case , __snake_case , force_download=not use_cached_models )
else:
_lowerCamelCase : int = model_shortcut_name
if os.path.isfile(__snake_case ):
_lowerCamelCase : Optional[int] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__snake_case , pytorch_checkpoint_path=__snake_case , config_file=__snake_case , tf_dump_path=os.path.join(__snake_case , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__snake_case , )
if remove_cached_files:
os.remove(__snake_case )
os.remove(__snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCAmelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 88 |
"""simple docstring"""
import random
def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = a[left_index]
_lowerCamelCase : Dict = left_index + 1
for j in range(left_index + 1 , __snake_case ):
if a[j] < pivot:
_lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j]
i += 1
_lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
if left < right:
_lowerCamelCase : Any = random.randint(__snake_case , right - 1 )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case )
quick_sort_random(
__snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )]
quick_sort_random(__snake_case , 0 , len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 88 | 1 |
import heapq
import sys
import numpy as np
a_ : List[Any] = tuple[int, int]
class UpperCamelCase :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : int ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(__SCREAMING_SNAKE_CASE )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , ) -> int:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Any = blocks_blk
# hyper parameters
a_ : Union[str, Any] = 1
a_ : List[Any] = 1
a_ : List[Any] = 20
a_ : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : Union[str, Any] = (0, 0)
a_ : Tuple = (n - 1, n - 1)
a_ : Optional[int] = 1
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.