code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
A: Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 109
|
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109
| 1
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self, lowercase_ = None, lowercase_ = 256, lowercase_ = 256, lowercase_ = 256, lowercase_ = 1024, lowercase_ = "relu", lowercase_ = 6, lowercase_ = 10, lowercase_ = 8, lowercase_ = 0.0, lowercase_ = 2048, lowercase_ = False, lowercase_ = False, lowercase_ = 4, lowercase_ = 255, lowercase_ = 100, lowercase_ = 0.1, lowercase_ = 2.0, lowercase_ = 5.0, lowercase_ = 5.0, lowercase_ = 12544, lowercase_ = 3.0, lowercase_ = 0.75, lowercase_ = 0.02, lowercase_ = 1.0, lowercase_ = True, lowercase_ = [4, 8, 16, 32], lowercase_ = None, **lowercase_, ) -> int:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
snake_case = CONFIG_MAPPING['swin'](
image_size=224, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=lowercase_, out_features=['stage1', 'stage2', 'stage3', 'stage4'], )
if isinstance(lowercase_, lowercase_ ):
snake_case = backbone_config.pop('model_type' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
snake_case = backbone_config
snake_case = feature_size
snake_case = mask_feature_size
snake_case = hidden_dim
snake_case = encoder_feedforward_dim
snake_case = activation_function
snake_case = encoder_layers
snake_case = decoder_layers
snake_case = num_attention_heads
snake_case = dropout
snake_case = dim_feedforward
snake_case = pre_norm
snake_case = enforce_input_projection
snake_case = common_stride
snake_case = ignore_value
snake_case = num_queries
snake_case = no_object_weight
snake_case = class_weight
snake_case = mask_weight
snake_case = dice_weight
snake_case = train_num_points
snake_case = oversample_ratio
snake_case = importance_sample_ratio
snake_case = init_std
snake_case = init_xavier_std
snake_case = use_auxiliary_loss
snake_case = feature_strides
snake_case = output_auxiliary_logits
snake_case = decoder_layers
super().__init__(**lowercase_ )
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> str:
return cls(
backbone_config=lowercase_, **lowercase_, )
def _lowerCamelCase ( self ) -> Dict[str, any]:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
| 332
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowerCamelCase =logging.getLogger(__name__)
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =git.Repo(search_parent_directories=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'repo_id': str(lowerCAmelCase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase_, 'git_log.json' ), 'w' ) as f:
json.dump(lowerCAmelCase_, lowerCAmelCase_, indent=4 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if params.n_gpu <= 0:
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =-1
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
SCREAMING_SNAKE_CASE =int(os.environ['WORLD_SIZE'] )
SCREAMING_SNAKE_CASE =int(os.environ['N_GPU_NODE'] )
SCREAMING_SNAKE_CASE =int(os.environ['RANK'] )
# number of nodes / node ID
SCREAMING_SNAKE_CASE =params.world_size // params.n_gpu_per_node
SCREAMING_SNAKE_CASE =params.global_rank // params.n_gpu_per_node
SCREAMING_SNAKE_CASE =True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
SCREAMING_SNAKE_CASE =params.node_id == 0 and params.local_rank == 0
SCREAMING_SNAKE_CASE =params.n_nodes > 1
# summary
SCREAMING_SNAKE_CASE =F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://', backend='nccl', )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 334
|
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : str = parent
A_ : Optional[int] = batch_size
A_ : List[str] = num_channels
A_ : Optional[Any] = min_resolution
A_ : Tuple = max_resolution
A_ : Optional[int] = do_resize
A_ : Dict = size
A_ : List[str] = do_normalize
A_ : Dict = image_mean
A_ : List[Any] = image_std
A_ : Dict = do_rescale
A_ : str = rescale_factor
A_ : Dict = do_pad
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , lowercase , lowercase=False ):
if not batched:
A_ : str = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_, A_ : Dict = image.size
else:
A_, A_ : List[str] = image.shape[1], image.shape[2]
if w < h:
A_ : List[Any] = int(self.size["""shortest_edge"""] * h / w )
A_ : Tuple = self.size["""shortest_edge"""]
elif w > h:
A_ : List[str] = self.size["""shortest_edge"""]
A_ : Dict = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : str = self.size["""shortest_edge"""]
A_ : Union[str, Any] = self.size["""shortest_edge"""]
else:
A_ : Union[str, Any] = []
for image in image_inputs:
A_, A_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[str] = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : Optional[int] = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def _a (self ):
A_ : List[Any] = DeformableDetrImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase , """do_pad""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
A_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
A_ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_, A_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : str = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Tuple = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Any = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : Dict = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ):
# prepare image and target
A_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : Any = json.loads(f.read() )
A_ : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : str = DeformableDetrImageProcessor()
A_ : Optional[int] = image_processing(images=lowercase , annotations=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : Optional[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Any = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify orig_size
A_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
@slow
def _a (self ):
# prepare image, target and masks_path
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : int = json.loads(f.read() )
A_ : int = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : Dict = DeformableDetrImageProcessor(format="""coco_panoptic""" )
A_ : List[Any] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Any = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify masks
A_ : int = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase )
# verify orig_size
A_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
| 135
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase :Optional[int] = logging.getLogger(__name__)
lowerCamelCase :Optional[int] = tf.data.AUTOTUNE
def a ( ):
'''simple docstring'''
A_ : Any = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
A_ : Dict = parser.parse_args()
return args
def a ( lowerCamelCase__ ):
'''simple docstring'''
try:
if args.tpu_name:
A_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
A_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ )
return tpu
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = 0
for file in file_list:
A_ : Optional[Any] = file.split("""/""" )[-1]
A_ : Optional[int] = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 )
A_ : Any = int(lowerCamelCase__ )
num_samples += sample_count
return num_samples
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
A_ : List[Any] = count_samples(lowerCamelCase__ )
A_ : List[str] = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ )
if shuffle:
A_ : Optional[int] = dataset.shuffle(len(lowerCamelCase__ ) )
A_ : Tuple = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A_ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) )
A_ : Any = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
A_ : List[str] = dataset.shuffle(args.shuffle_buffer_size )
A_ : str = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ )
A_ : str = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
A_ : Optional[int] = dataset.prefetch(lowerCamelCase__ )
return dataset
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not args.no_tpu:
A_ : Dict = initialize_tpu(lowerCamelCase__ )
A_ : str = tf.distribute.TPUStrategy(lowerCamelCase__ )
else:
A_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
A_ : str = AutoConfig.from_pretrained(args.pretrained_model_config )
A_ : Optional[int] = tokenizer.vocab_size
A_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
A_ : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
A_ : Dict = count_samples(lowerCamelCase__ )
A_ : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
A_ : Optional[int] = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A_, A_ : Any = create_optimizer(
num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCamelCase__ ):
A_ : Optional[int] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCamelCase__ ):
# TF really needs an isin() function
A_ : str = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A_, A_ : Union[str, Any] = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , )
return batch
A_ : str = args.per_replica_batch_size * strategy.num_replicas_in_sync
A_ : Tuple = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
A_ : Optional[Any] = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
A_ : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) )
model.fit(
lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase :Tuple = parse_args()
main(args)
| 135
| 1
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : List[str] = np.full((len(_lowerCAmelCase ), sequence_length, 2) , _lowerCAmelCase )
else:
snake_case__ : Union[str, Any] = np.full((len(_lowerCAmelCase ), sequence_length) , _lowerCAmelCase )
for i, tensor in enumerate(_lowerCAmelCase ):
if padding_side == "right":
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : int = tensor[:sequence_length]
else:
snake_case__ : Tuple = tensor[:sequence_length]
else:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Dict = tensor[:sequence_length]
else:
snake_case__ : Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Tuple = ord(_lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
snake_case__ : str = unicodedata.category(_lowerCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = -1_00
lowercase = "pt"
def lowerCamelCase ( self : Dict , snake_case_ : str ):
import torch
snake_case__ : Any = """label""" if """label""" in features[0].keys() else """labels"""
snake_case__ : Any = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case__ : Tuple = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
snake_case__ : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
snake_case__ : Tuple = self.tokenizer.padding_side
if padding_side == "right":
snake_case__ : Union[str, Any] = [
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
snake_case__ : Optional[int] = [
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
snake_case__ : Any = [feature["""ner_tags"""] for feature in features]
snake_case__ : Union[str, Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = [feature["""original_entity_spans"""] for feature in features]
snake_case__ : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
snake_case__ : Tuple = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 35
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A_ : int = logging.get_logger(__name__)
A_ : List[str] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : str = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
A_ : List[Any] = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
A_ : Dict = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = SqueezeBertTokenizer
def __init__( self ,a_=None ,a_=None ,a_=True ,a_="[UNK]" ,a_="[SEP]" ,a_="[PAD]" ,a_="[CLS]" ,a_="[MASK]" ,a_=True ,a_=None ,**a_ ,) -> Optional[int]:
super().__init__(
a_ ,tokenizer_file=a_ ,do_lower_case=a_ ,unk_token=a_ ,sep_token=a_ ,pad_token=a_ ,cls_token=a_ ,mask_token=a_ ,tokenize_chinese_chars=a_ ,strip_accents=a_ ,**a_ ,)
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,a_ ) != tokenize_chinese_chars
):
_UpperCAmelCase : List[Any] = getattr(a_ ,normalizer_state.pop("""type""" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : List[Any] = strip_accents
_UpperCAmelCase : Optional[Any] = tokenize_chinese_chars
_UpperCAmelCase : Tuple = normalizer_class(**a_ )
_UpperCAmelCase : str = do_lower_case
def _snake_case ( self ,a_ ,a_=None ) -> int:
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
_UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
_UpperCAmelCase : str = self._tokenizer.model.save(a_ ,name=a_ )
return tuple(a_ )
| 349
|
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Dict = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__lowerCAmelCase : Optional[int] = '▁'
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
a__ = BarthezTokenizer
def __init__( self : Any , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Tuple="<mask>" , **UpperCamelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
__magic_name__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 88
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : List[Any] = "PoolFormerConfig"
# Base docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : str = [1, 5_12, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : List[Any] = "tabby, tabby cat"
_SCREAMING_SNAKE_CASE : List[str] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case = 1 - drop_prob
snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case = keep_prob + torch.rand(UpperCamelCase_ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case = input.div(UpperCamelCase_ ) * random_tensor
return output
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case = None ):
super().__init__()
snake_case = drop_prob
def a_ ( self , __snake_case ):
return drop_path(__snake_case , self.drop_prob , self.training )
def a_ ( self ):
return "p={}".format(self.drop_prob )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ):
super().__init__()
snake_case = patch_size if isinstance(__snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case = stride if isinstance(__snake_case , collections.abc.Iterable ) else (stride, stride)
snake_case = padding if isinstance(__snake_case , collections.abc.Iterable ) else (padding, padding)
snake_case = nn.Convad(__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case )
snake_case = norm_layer(__snake_case ) if norm_layer else nn.Identity()
def a_ ( self , __snake_case ):
snake_case = self.projection(__snake_case )
snake_case = self.norm(__snake_case )
return embeddings
class A__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , __snake_case , **__snake_case ):
super().__init__(1 , __snake_case , **__snake_case )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.AvgPoolad(__snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=__snake_case )
def a_ ( self , __snake_case ):
return self.pool(__snake_case ) - hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = PoolFormerDropPath(__snake_case )
if isinstance(config.hidden_act , __snake_case ):
snake_case = ACTaFN[config.hidden_act]
else:
snake_case = config.hidden_act
def a_ ( self , __snake_case ):
snake_case = self.conva(__snake_case )
snake_case = self.act_fn(__snake_case )
snake_case = self.drop(__snake_case )
snake_case = self.conva(__snake_case )
snake_case = self.drop(__snake_case )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = PoolFormerPooling(__snake_case )
snake_case = PoolFormerOutput(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
# Useful for training neural nets
snake_case = PoolFormerDropPath(__snake_case ) if drop_path > 0.0 else nn.Identity()
snake_case = config.use_layer_scale
if config.use_layer_scale:
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
def a_ ( self , __snake_case ):
if self.use_layer_scale:
snake_case = self.pooling(self.before_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = ()
snake_case = self.output(self.after_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = (output,) + outputs
return outputs
else:
snake_case = self.drop_path(self.pooling(self.before_norm(__snake_case ) ) )
# First residual connection
snake_case = pooling_output + hidden_states
snake_case = ()
# Second residual connection inside the PoolFormerOutput block
snake_case = self.drop_path(self.output(self.after_norm(__snake_case ) ) )
snake_case = hidden_states + layer_output
snake_case = (output,) + outputs
return outputs
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = config
# stochastic depth decay rule
snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case = nn.ModuleList(__snake_case )
# Transformer blocks
snake_case = []
snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__snake_case ) )
snake_case = nn.ModuleList(__snake_case )
def a_ ( self , __snake_case , __snake_case=False , __snake_case=True ):
snake_case = () if output_hidden_states else None
snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case = layers
# Get patch embeddings from hidden_states
snake_case = embedding_layer(__snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(__snake_case ):
snake_case = blk(__snake_case )
snake_case = layer_outputs[0]
if output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = PoolFormerConfig
__magic_name__ = 'poolformer'
__magic_name__ = 'pixel_values'
__magic_name__ = True
def a_ ( self , __snake_case ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self , __snake_case , __snake_case=False ):
if isinstance(__snake_case , __snake_case ):
snake_case = value
_SCREAMING_SNAKE_CASE : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config
snake_case = PoolFormerEncoder(__snake_case )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__snake_case , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self , __snake_case ):
snake_case = self.dense(__snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config.num_labels
snake_case = PoolFormerModel(__snake_case )
# Final norm
snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.poolformer(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = outputs[0]
snake_case = self.classifier(self.norm(__snake_case ).mean([-2, -1] ) )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = '''single_label_classification'''
else:
snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(__snake_case , __snake_case )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 127
| 0
|
"""simple docstring"""
import numpy as np
def A_ ( snake_case_ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27
| 0
|
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__A = mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
__A = max(
mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
__A = val
return f[i][j]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
__A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__A = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__A = len(__lowerCAmelCase )
if num_items != len(__lowerCAmelCase ):
__A = (
'''The number of weights must be the same as the number of values.\n'''
F'''But got {num_items} weights and {len(__lowerCAmelCase )} values'''
)
raise ValueError(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
if not isinstance(wt[i] , __lowerCAmelCase ):
__A = (
'''All weights must be integers but got weight of '''
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__lowerCAmelCase )
__A = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__A = set()
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return optimal_val, example_optional_set
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Any:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
optimal_set.add(__lowerCAmelCase )
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE :Optional[int] = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE :Union[str, Any] = 4
SCREAMING_SNAKE_CASE :str = 6
SCREAMING_SNAKE_CASE :Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :int = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 15
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348
| 0
|
'''simple docstring'''
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 350
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __a ( _UpperCamelCase: list[Any] ) -> None:
"""simple docstring"""
create_state_space_tree(_UpperCamelCase , [] , 0 )
def __a ( _UpperCamelCase: list[Any] , _UpperCamelCase: list[Any] , _UpperCamelCase: int ) -> None:
"""simple docstring"""
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCamelCase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 142
| 0
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a__ ):
snake_case__ = (EulerDiscreteScheduler,)
snake_case__ = 10
def lowerCamelCase__ ( self : List[Any] , **UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : int = {
"num_train_timesteps": 1100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**UpperCAmelCase )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Dict = self.dummy_model()
__lowerCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase : Union[str, Any] = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
__lowerCamelCase : Tuple = output.prev_sample
__lowerCamelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCamelCase : Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Dict = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase : Dict = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : Tuple = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Tuple = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
__lowerCamelCase : Tuple = output.prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = self.dummy_model()
__lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCamelCase : List[Any] = sample.to(UpperCAmelCase )
for t in scheduler.timesteps:
__lowerCamelCase : List[Any] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = output.prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Any = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config()
__lowerCamelCase : Dict = scheduler_class(**UpperCAmelCase , use_karras_sigmas=UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : str = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCamelCase : List[Any] = sample.to(UpperCAmelCase )
for t in scheduler.timesteps:
__lowerCamelCase : str = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : int = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
__lowerCamelCase : str = output.prev_sample
__lowerCamelCase : Optional[int] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
| 135
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = 4
__lowerCamelCase : List[Any] = 3
__lowerCamelCase : List[Any] = (32, 32)
__lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : int ):
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : Tuple ):
return (3, 32, 32)
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = 4
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = (32, 32)
__lowerCamelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return (4, 32, 32)
@property
def lowerCamelCase__ ( self : Any ):
return (4, 32, 32)
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__lowerCamelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
__lowerCamelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase , __lowerCamelCase : List[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
__lowerCamelCase : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase__ ( self : Tuple ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__lowerCamelCase , __lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
__lowerCamelCase : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : int = noise.to(UpperCAmelCase )
__lowerCamelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
__lowerCamelCase : int = model_accelerate(UpperCAmelCase , UpperCAmelCase )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__lowerCamelCase , __lowerCamelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
__lowerCamelCase : Any = model_normal_load(UpperCAmelCase , UpperCAmelCase )["sample"]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase )
__lowerCamelCase : str = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : Optional[int] = noise.to(UpperCAmelCase )
__lowerCamelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCamelCase : Optional[Any] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 ) )
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = UNetaDModel
snake_case__ = "sample"
@property
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any]=(32, 32) ):
__lowerCamelCase : Tuple = 4
__lowerCamelCase : int = 3
__lowerCamelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__lowerCamelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : List[str] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
__lowerCamelCase : int = self.dummy_input
__lowerCamelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase )
__lowerCamelCase : int = noise
__lowerCamelCase : int = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : List[Any] = 3
__lowerCamelCase : Tuple = (256, 256)
__lowerCamelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Tuple = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : List[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : Any = 3
__lowerCamelCase : Union[str, Any] = (32, 32)
__lowerCamelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
__lowerCamelCase : Dict = torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : List[str] = model(UpperCAmelCase , UpperCAmelCase ).sample
__lowerCamelCase : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : int ):
# not required for this model
pass
| 135
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__UpperCamelCase : List[str] = {
"RUCAIBox/mvp": 1024,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = MvpTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str="replace" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Optional[int]="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=True , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_a )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[str] = """post_processor"""
SCREAMING_SNAKE_CASE : str = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : str = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : str = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : str = False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[str] = add_prefix_space
SCREAMING_SNAKE_CASE : List[Any] = True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
SCREAMING_SNAKE_CASE : Any = trim_offsets
SCREAMING_SNAKE_CASE : Tuple = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
SCREAMING_SNAKE_CASE : Any = value
def __A ( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_a , **_a )
def __A ( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_a , **_a )
def __A ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 362
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase__ : str =logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] ='''T5Config'''
class __A ( a ):
__A = """mt5"""
__A = MTaConfig
class __A ( a ):
__A = """mt5"""
__A = MTaConfig
class __A ( a ):
__A = """mt5"""
__A = MTaConfig
| 262
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase__ : Union[str, Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCAmelCase__ : List[str] =(
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
)
UpperCAmelCase__ : Tuple ='''|'''.join(sys.argv[1:])
UpperCAmelCase__ : List[str] =re.compile(rF"^({joined_dirs}).*?\.py$")
UpperCAmelCase__ : Any =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 262
| 1
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule):
def __init__( self : str , lowercase_ : Optional[int] ):
super().__init__()
lowercase_ : Optional[Any] = model
lowercase_ : Any = 2
lowercase_ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Dict:
# load longformer model from model identifier
lowercase_ : str = LongformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Any = LightningModel(_SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowercase_ : Any = LongformerForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 239
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Dict ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Union[str, Any] ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
a_ : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
if dataset.shape[1] != value_array.shape[1]:
a_ : Optional[int] = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
a_ : Any = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
a_ : int = []
for value in value_array:
a_ : int = euclidean(SCREAMING_SNAKE_CASE_ , dataset[0] )
a_ : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
a_ : Optional[int] = euclidean(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if dist > temp_dist:
a_ : Optional[int] = temp_dist
a_ : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] ) -> float:
"""simple docstring"""
return np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / (norm(SCREAMING_SNAKE_CASE_ ) * norm(SCREAMING_SNAKE_CASE_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''instructblip_vision_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=1_4_0_8 , SCREAMING_SNAKE_CASE__ : int=6_1_4_4 , SCREAMING_SNAKE_CASE__ : Dict=3_9 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=1E-6 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=1E-10 , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = hidden_size
a_ : Any = intermediate_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : str = patch_size
a_ : Any = image_size
a_ : Dict = initializer_range
a_ : List[Any] = attention_dropout
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[Any] = hidden_act
a_ : Optional[Any] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''instructblip_qformer'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : str=1_4_0_8 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : List[str] = initializer_range
a_ : Any = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : List[str] = cross_attention_frequency
a_ : Union[str, Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''instructblip'''
snake_case__ : List[str] = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=3_2 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
a_ : Dict = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
a_ : List[str] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
a_ : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
a_ : int = InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : int = InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
a_ : List[Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.text_config.tie_word_embeddings
a_ : List[Any] = self.text_config.is_encoder_decoder
a_ : Optional[Any] = num_query_tokens
a_ : int = self.vision_config.hidden_size
a_ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a_ : Optional[int] = 1.0
a_ : Any = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : InstructBlipVisionConfig , SCREAMING_SNAKE_CASE__ : InstructBlipQFormerConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Optional[int] = self.vision_config.to_dict()
a_ : Optional[Any] = self.qformer_config.to_dict()
a_ : Union[str, Any] = self.text_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 120
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_UpperCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = '''A red cat sitting on a park bench'''
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowerCAmelCase , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 289
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
_UpperCAmelCase : int = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *A : int , **A : Optional[int] ) ->str:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : Any , *A : Optional[int] , **A : int ) ->int:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : List[Any] , *A : List[str] , **A : str ) ->Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 142
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Any = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks' ) )
class snake_case :
def __init__( self : List[Any] , A : Dict , A : List[Any]=1_3 , A : str=6_4 , A : Union[str, Any]=3 , A : Union[str, Any]=4 , A : Union[str, Any]=[2, 2, 2, 2] , A : List[str]=[8, 4, 2, 1] , A : Optional[Any]=[1_6, 3_2, 6_4, 1_2_8] , A : Optional[Any]=[1, 4, 8, 1_6] , A : Tuple=[1, 2, 4, 8] , A : Optional[Any]=True , A : Any=True , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : List[str]=0.02 , A : List[Any]=3 , A : str=None , ):
'''simple docstring'''
a : Optional[Any] = parent
a : Optional[Any] = batch_size
a : Optional[Any] = image_size
a : Optional[int] = num_channels
a : List[str] = num_encoder_blocks
a : Optional[Any] = sr_ratios
a : Any = depths
a : Any = hidden_sizes
a : Union[str, Any] = downsampling_rates
a : Any = num_attention_heads
a : int = is_training
a : Dict = use_labels
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : Dict = num_labels
a : Union[str, Any] = scope
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : int = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , A : str , A : List[Any] , A : List[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerModel(config=A )
model.to(A )
model.eval()
a : Union[str, Any] = model(A )
a : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] , A : str , A : Optional[Any] ):
'''simple docstring'''
a : List[Any] = self.num_labels
a : Optional[int] = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
a : str = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a : int = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Dict , A : Dict , A : Any , A : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = 1
a : List[Any] = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
a : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A )
a : Dict = model(A , labels=A )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : str = self.prepare_config_and_inputs()
a, a, a : str = config_and_inputs
a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Union[str, Any] = SegformerModelTester(self )
a : Tuple = SegformerConfigTester(self , config_class=A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a, a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(A )
a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Optional[Any] = True
a : Tuple = False
a : int = True
a : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
a : Union[str, Any] = outputs.attentions
a : Tuple = sum(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Tuple = True
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(A , A ) )
a : Optional[int] = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a : Tuple = (self.model_tester.image_size // 3_2) ** 2
a : Tuple = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a : str = len(A )
# Check attention is always last and order is fine
a : str = True
a : Tuple = True
a : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
a : str = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] , A : List[str] , A : Union[str, Any] ):
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(A , A ) )
a : Tuple = outputs.hidden_states
a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) , A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : str = True
check_hidden_states_output(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
a : List[Any] = model_class(A )
model.to(A )
model.train()
a : Tuple = self._prepare_for_class(A , A , return_labels=A )
a : Any = model(**A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : str = prepare_img()
a : List[str] = image_processor(images=A , return_tensors='pt' )
a : List[str] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[int] = model(A )
a : Any = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : str = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A )
a : List[Any] = prepare_img()
a : Optional[Any] = image_processor(images=A , return_tensors='pt' )
a : int = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[Any] = model(A )
a : Tuple = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1 ) )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : int = prepare_img()
a : Any = image_processor(images=A , return_tensors='pt' )
a : List[Any] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : str = model(A )
a : str = outputs.logits.detach().cpu()
a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0_0, 3_0_0)] )
a : Dict = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , A )
a : int = image_processor.post_process_semantic_segmentation(outputs=A )
a : Any = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , A )
| 186
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A : int = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=None , ):
_A , _A , _A , _A : Any = bos, unk, pad, eos
_A : Optional[Any] = []
_A : Optional[Any] = []
_A : Optional[int] = {}
_A : Dict = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : str = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : Any = self.add_symbol(SCREAMING_SNAKE_CASE)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = len(self.symbols)
def __eq__( self : int , SCREAMING_SNAKE_CASE : Optional[Any]):
return self.indices == other.indices
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Union[str, Any]):
return len(self.symbols)
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]):
return sym in self.indices
@classmethod
def A ( cls : Dict , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : Any = cls()
d.add_from_file(SCREAMING_SNAKE_CASE)
return d
def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : int=False):
if word in self.indices and not overwrite:
_A : str = self.indices[word]
_A : List[str] = self.count[idx] + n
return idx
else:
_A : Optional[Any] = len(self.symbols)
_A : Union[str, Any] = idx
self.symbols.append(SCREAMING_SNAKE_CASE)
self.count.append(SCREAMING_SNAKE_CASE)
return idx
def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]):
return 0
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
try:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE))
return
_A : Union[str, Any] = f.readlines()
_A : Any = self._load_meta(SCREAMING_SNAKE_CASE)
for line in lines[indices_start_line:]:
try:
_A , _A : List[str] = line.rstrip().rsplit(' ' , 1)
if field == "#fairseq:overwrite":
_A : int = True
_A , _A : List[str] = line.rsplit(' ' , 1)
else:
_A : Union[str, Any] = False
_A : List[str] = int(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE))
self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE)
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'')
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Union[str, Any] = dict((re.sub(R'@@$' ,'' ,lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' ,'</w>' ,lowerCamelCase ), v) for k, v in d.items() )
_A : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_A : str = d[k] # restore
return da
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] ):
# prep
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_A : Dict = os.path.join(lowerCamelCase ,'checkpoint.pt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_A : int = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Dict = chkpt['cfg']['model']
# dicts
_A : Any = os.path.join(lowerCamelCase ,'dict.txt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_A : Any = Dictionary.load(lowerCamelCase )
_A : Optional[int] = rewrite_dict_keys(src_dict.indices )
_A : List[Any] = len(lowerCamelCase )
_A : str = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# merges_file (bpecodes)
_A : Optional[int] = os.path.join(lowerCamelCase ,'bpecodes' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_A : Dict = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCamelCase ,lowerCamelCase )
# model config
_A : str = os.path.join(lowerCamelCase ,'config.json' )
_A : int = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# tokenizer config
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
_A : Any = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# model
_A : List[Any] = chkpt['model']
# remove unneeded keys
_A : int = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase ,lowerCamelCase )
_A : Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A : str = model_state_dict.pop(lowerCamelCase )
else:
_A : Dict = model_state_dict.pop(lowerCamelCase )
_A : Any = BioGptConfig.from_pretrained(lowerCamelCase )
_A : Union[str, Any] = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase ,lowerCamelCase )
print('Conversion is done!' )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 227
|
'''simple docstring'''
A : Dict = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( lowerCamelCase : str ):
_A : Dict = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A : Stack[int] = Stack()
_A : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase )
elif i == ")":
# RULE 4
_A : List[str] = operator_stack.peek()
operator_stack.pop()
_A : Dict = operand_stack.peek()
operand_stack.pop()
_A : Optional[int] = operand_stack.peek()
operand_stack.pop()
_A : Dict = operators[opr](lowerCamelCase ,lowerCamelCase )
operand_stack.push(lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A : Dict = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 227
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Optional[int] = (3_2, 3_2)
lowerCAmelCase_ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def lowercase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowercase_ ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(__lowercase )
@property
def lowercase_ ( self ) -> List[Any]:
def extract(*__lowercase , **__lowercase ):
class snake_case__:
'''simple docstring'''
def __init__( self ) -> Any:
lowerCAmelCase_ : Optional[int] = torch.ones([0] )
def lowercase_ ( self , __lowercase ) -> str:
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Optional[int] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase_ : Optional[int] = 7_7
lowerCAmelCase_ : List[Any] = self.dummy_image.to(__lowercase )
lowerCAmelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : str = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
lowerCAmelCase_ : List[str] = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : str = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , return_dict=__lowercase , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ : Any = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Tuple = self.dummy_cond_unet
lowerCAmelCase_ : List[str] = PNDMScheduler(skip_prk_steps=__lowercase )
lowerCAmelCase_ : int = self.dummy_vae
lowerCAmelCase_ : List[Any] = self.dummy_text_encoder
lowerCAmelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase_ : List[Any] = 7_7
lowerCAmelCase_ : Tuple = self.dummy_image.to(__lowercase )
# put models in fp16
lowerCAmelCase_ : Union[str, Any] = unet.half()
lowerCAmelCase_ : Any = vae.half()
lowerCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : str = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
lowerCAmelCase_ : Dict = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = alt_pipe(
[prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , image=__lowercase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase_ : Union[str, Any] = init_image.resize((7_6_0, 5_0_4) )
lowerCAmelCase_ : int = '''BAAI/AltDiffusion'''
lowerCAmelCase_ : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase_ : int = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Any = output.images[0]
lowerCAmelCase_ : Dict = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCAmelCase_ : Tuple = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase_ : List[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCAmelCase_ : Tuple = '''BAAI/AltDiffusion'''
lowerCAmelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase_ : int = torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 262
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
| 262
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = CLIPTokenizer
__UpperCAmelCase : Union[str, Any] = CLIPTokenizerFast
__UpperCAmelCase : Any = True
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : str = False
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCAmelCase__ = {"unk_token": "<unk>"}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def lowercase_ (self : int , **__UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ (self : List[Any] , **__UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ (self : List[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "lower newer"
UpperCAmelCase__ = "lower newer"
return input_text, output_text
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ = "lower newer"
UpperCAmelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@require_ftfy
def lowercase_ (self : int ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCAmelCase__ = tokenizer_s.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCAmelCase__ = tokenizer_s.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase__ = tokenizer_s.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase__ = tokenizer_s.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase__ = f"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
UpperCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase__ = f""" {text}"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
UpperCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
def lowercase_ (self : str ) -> int:
"""simple docstring"""
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
| 143
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 143
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neox'
def __init__( self : Optional[int] , lowerCamelCase : Tuple=5_04_32 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Tuple=44 , lowerCamelCase : Any=64 , lowerCamelCase : List[Any]=2_45_76 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.25 , lowerCamelCase : Any=1_00_00 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=20_48 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Any=1E-5 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[Any] = rotary_pct
lowerCAmelCase_ : Any = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = hidden_dropout
lowerCAmelCase_ : Tuple = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __lowercase ( self : List[str] ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCamelCase )
lowerCAmelCase_ : int = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 120
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_A : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
_A : Any = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class _lowercase ( _lowercase , unittest.TestCase ):
a = CamembertTokenizer
a = CamembertTokenizerFast
a = True
a = True
def lowerCamelCase_ ( self: Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : List[str] = CamembertTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = """<pad>"""
lowerCamelCase__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_004 )
def lowerCamelCase_ ( self: List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Dict = CamembertTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Any = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Tuple = tokenizer.encode(UpperCamelCase__ )
lowerCamelCase__ : Any = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : int = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCamelCase__ )
lowerCamelCase__ : int = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase__ : str = tokenizer.encode(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# fmt: off
lowerCamelCase__ : Any = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase__ : Optional[int] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCamelCase__ , )
| 356
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[str]:
if nth_term == "":
return [""]
lowerCamelCase__ : str = int(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = int(UpperCamelCase )
lowerCamelCase__ : list[str] = []
for temp in range(int(UpperCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(UpperCamelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Optional[Any] =int(input('''Enter the last number (nth term) of the P-Series'''))
_A : List[str] =int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 129
| 0
|
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> tuple:
"""simple docstring"""
_lowercase =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize )->Any:
'''simple docstring'''
A_ : Dict = '''bilinear'''
A_ : Optional[Any] = max_size
A_ : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = []
for img in imgs:
A_ , A_ : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
A_ : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A_ : int = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
A_ , A_ : Tuple = size, scale * w
else:
A_ , A_ : List[str] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
A_ : List[Any] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = newh * scale
A_ : List[str] = neww * scale
A_ : List[Any] = int(neww + 0.5 )
A_ : Tuple = int(newh + 0.5 )
if img.dtype == np.uinta:
A_ : List[str] = Image.fromarray(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A_ : Dict = np.asarray(_SCREAMING_SNAKE_CASE )
else:
A_ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A_ : List[str] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Tuple = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A_ : Union[str, Any] = cfg.INPUT.FORMAT
A_ : int = cfg.SIZE_DIVISIBILITY
A_ : Tuple = cfg.PAD_VALUE
A_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
A_ : List[str] = cfg.MODEL.DEVICE
A_ : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Any = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A_ : List[Any] = [im.shape[-2:] for im in images]
A_ : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Dict:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Dict = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A_ : List[str] = torch.tensor([im.shape[:2] for im in images] )
A_ : Union[str, Any] = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A_ : List[str] = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A_ , A_ : Any = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A_ : str = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
A_ , A_ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 186
| 0
|
"""simple docstring"""
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : int , _lowercase : str , _lowercase : List[Any] ):
__UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_lowercase ) )
]
__UpperCAmelCase = defaultdict(_lowercase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__UpperCAmelCase = (1 << len(_lowercase )) - 1
def a ( self : Dict , _lowercase : int , _lowercase : Any ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__UpperCAmelCase = self.count_ways_until(_lowercase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def a ( self : Any , _lowercase : Tuple ):
# Store the list of persons for each task
for i in range(len(_lowercase ) ):
for j in task_performed[i]:
self.task[j].append(_lowercase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 370
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCAmelCase = None
if self.model.config.prefix is not None:
__UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
__UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
__UpperCAmelCase = {**self._forward_params, **forward_params}
def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ):
__UpperCAmelCase = {}
if prefix is not None:
__UpperCAmelCase = prefix
if prefix:
__UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
__UpperCAmelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCAmelCase = generate_kwargs['''max_new_tokens''']
else:
__UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = model_inputs['''input_ids''']
__UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
else:
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
__UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ):
__UpperCAmelCase = model_outputs['''generated_sequence'''][0]
__UpperCAmelCase = model_outputs['''input_ids''']
__UpperCAmelCase = model_outputs['''prompt_text''']
__UpperCAmelCase = generated_sequence.numpy().tolist()
__UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCAmelCase = prompt_text + text[prompt_length:]
else:
__UpperCAmelCase = text[prompt_length:]
__UpperCAmelCase = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 86
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCamelCase_ (self , lowerCamelCase_=0 ):
"""simple docstring"""
a = np.random.RandomState(lowerCamelCase_ )
a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = 3 * [inputs["prompt"]]
# forward
a = pipe(**lowerCamelCase_ )
a = output.images[0, -3:, -3:, -1]
a = self.get_dummy_inputs()
a = 3 * [inputs.pop("prompt" )]
a = pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="np" , )
a = text_inputs["input_ids"]
a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a = prompt_embeds
# forward
a = pipe(**lowerCamelCase_ )
a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs()
a = 3 * ["this is a negative prompt"]
a = negative_prompt
a = 3 * [inputs["prompt"]]
# forward
a = pipe(**lowerCamelCase_ )
a = output.images[0, -3:, -3:, -1]
a = self.get_dummy_inputs()
a = 3 * [inputs.pop("prompt" )]
a = []
for p in [prompt, negative_prompt]:
a = pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="np" , )
a = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a , a = embeds
# forward
a = pipe(**lowerCamelCase_ )
a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ort.SessionOptions()
a = False
return options
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = "A painting of a squirrel eating a burger"
np.random.seed(0 )
a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = "open neural network exchange"
a = np.random.RandomState(0 )
a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type="np" )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = "open neural network exchange"
a = np.random.RandomState(0 )
a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type="np" )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 0
def test_callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
a = latents[0, -3:, -3:, -1]
a = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
a = latents[0, -3:, -3:, -1]
a = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
a = False
a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = "Andromeda galaxy in a bottle"
a = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
a = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 227
|
def a( A : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
a = len(bin(A )[3:] )
a = bin(abs(A ) - (1 << binary_number_length) )[3:]
a = (
(
"1"
+ "0" * (binary_number_length - len(A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (DDPMScheduler,)
def lowercase_ ( self : int , **__lowerCamelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Tuple ) -> str:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : Any ) -> Tuple:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : str ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def lowercase_ ( self : Dict ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__lowerCamelCase )
def lowercase_ ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = -1
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ = scheduler.previous_timestep(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 51, 0]
with self.assertRaises(__lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 218
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """layoutlmv3"""
def __init__( self , __UpperCamelCase=50265 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=1024 , __UpperCamelCase=128 , __UpperCamelCase=128 , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=128 , __UpperCamelCase=64 , __UpperCamelCase=256 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=224 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=None , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Any = max_ad_position_embeddings
snake_case__ : Tuple = coordinate_size
snake_case__ : Dict = shape_size
snake_case__ : Any = has_relative_attention_bias
snake_case__ : Any = rel_pos_bins
snake_case__ : Any = max_rel_pos
snake_case__ : str = has_spatial_attention_bias
snake_case__ : Union[str, Any] = rel_ad_pos_bins
snake_case__ : Tuple = max_rel_ad_pos
snake_case__ : List[str] = text_embed
snake_case__ : str = visual_embed
snake_case__ : int = input_size
snake_case__ : Tuple = num_channels
snake_case__ : str = patch_size
snake_case__ : str = classifier_dropout
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.12""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __a ( self ) -> int:
'''simple docstring'''
return 12
def __a ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 3 , __UpperCamelCase = 40 , __UpperCamelCase = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : Any = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
snake_case__ : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Optional[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case__ : Dict = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case__ : str = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ : Optional[int] = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs
| 143
|
from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
snake_case__ : Optional[Any] = list(range(len(A__ ) ) )
snake_case__ : str = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
snake_case__ : str = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A( a__ ):
snake_case_ = """Speech2TextFeatureExtractor"""
snake_case_ = """Speech2TextTokenizer"""
def __init__( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__a = self.feature_extractor
__a = False
def __call__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__a = kwargs.pop('''raw_speech''' )
else:
__a = kwargs.pop('''audio''' , lowerCAmelCase__ )
__a = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
__a = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
__a = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 362
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = tempfile.mkdtemp()
_lowercase : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_lowercase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
_lowercase : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
_lowercase : Tuple = os.path.join(self.tmpdirname, __lowerCamelCase)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(__lowerCamelCase, __lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname, **__lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname, **__lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **__lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta)]
_lowercase : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCamelCase, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : int = self.get_rust_tokenizer()
_lowercase : List[str] = self.get_image_processor()
_lowercase : str = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowercase : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=__lowerCamelCase)
_lowercase : int = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowercase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __lowerCamelCase)
self.assertIsInstance(processor_fast.tokenizer, __lowerCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __lowerCamelCase)
self.assertIsInstance(processor_fast.image_processor, __lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowercase : Tuple = self.get_tokenizer(cls_token='(CLS)', sep_token='(SEP)')
_lowercase : int = self.get_image_processor(do_normalize=__lowerCamelCase)
_lowercase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='(CLS)', sep_token='(SEP)', do_normalize=__lowerCamelCase)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __lowerCamelCase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : int = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Any = image_processor(__lowerCamelCase, return_tensors='np')
_lowercase : Union[str, Any] = processor(images=__lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : Optional[int] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
_lowercase : List[Any] = '''Alexandra,T-shirt的价格是15便士。'''
_lowercase : List[str] = processor(text=__lowerCamelCase)
_lowercase : str = tokenizer(__lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : int = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
_lowercase : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Any = processor(text=__lowerCamelCase, images=__lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase):
processor()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
_lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : Union[str, Any] = processor.batch_decode(__lowerCamelCase)
_lowercase : Any = tokenizer.batch_decode(__lowerCamelCase)
self.assertListEqual(__lowerCamelCase, __lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase, image_processor=__lowerCamelCase)
_lowercase : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
_lowercase : Tuple = self.prepare_image_inputs()
_lowercase : Union[str, Any] = processor(text=__lowerCamelCase, images=__lowerCamelCase)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 21
|
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_):
if len(lowerCamelCase_) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase_))
return data_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = min(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = max(lowerCamelCase_)
lowerCAmelCase__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
lowerCAmelCase__ : Optional[int] = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_)
score_lists.append(lowerCamelCase_)
return score_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_):
lowerCAmelCase__ : str = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_data(lowerCamelCase_)
lowerCAmelCase__ : Dict = calculate_each_score(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = generate_final_scores(lowerCamelCase_)
# append scores to source data
for i, ele in enumerate(lowerCamelCase_):
source_data[i].append(lowerCamelCase_)
return source_data
| 129
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Any = {}
with open(snake_case__, "r" ) as file:
for line_number, line in enumerate(snake_case__ ):
__UpperCAmelCase : Any = line.strip()
if line:
__UpperCAmelCase : List[Any] = line.split()
__UpperCAmelCase : int = line_number
__UpperCAmelCase : Optional[int] = words[0]
__UpperCAmelCase : Tuple = value
return result
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Tuple:
for attribute in key.split("." ):
__UpperCAmelCase : Optional[Any] = getattr(snake_case__, snake_case__ )
__UpperCAmelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
__UpperCAmelCase : Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]]
__UpperCAmelCase : Dict = "param"
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Any = getattr(snake_case__, snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : int = hf_pointer
for attribute in hf_param_name.split("." ):
__UpperCAmelCase : Dict = getattr(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : Any = value[0]
else:
__UpperCAmelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : Any = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[Any] = value
elif weight_type == "bias":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__UpperCAmelCase : Optional[Any] = getattr(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = value
else:
__UpperCAmelCase : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
__UpperCAmelCase : Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]]
__UpperCAmelCase : Any = "param"
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Optional[int] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Dict = ".".join([key, hf_param_name] )
else:
__UpperCAmelCase : List[Any] = key
__UpperCAmelCase : List[str] = value if "lm_head" in full_key else value[0]
_snake_case = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None ) -> int:
__UpperCAmelCase : Tuple = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
__UpperCAmelCase : List[str] = name.split(snake_case__ )[0].split("." )[-2]
__UpperCAmelCase : Optional[Any] = mapped_key.replace("*", snake_case__ )
if "weight_g" in name:
__UpperCAmelCase : Optional[int] = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : List[str] = "weight"
else:
__UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
else:
set_recursively(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
return is_used
return is_used
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__, snake_case__, snake_case__, snake_case__, hf_model.config.feat_extract_norm == "group", )
__UpperCAmelCase : Tuple = True
else:
__UpperCAmelCase : Dict = load_wavaveca_layer(snake_case__, snake_case__, snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Optional[int] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : str = name.split("." )
__UpperCAmelCase : Dict = int(items[0] )
__UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None, snake_case__=True, snake_case__=False ) -> List[Any]:
if config_path is not None:
__UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
else:
__UpperCAmelCase : Optional[Any] = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : List[Any] = read_txt_into_dict(snake_case__ )
__UpperCAmelCase : Optional[Any] = idalabel
__UpperCAmelCase : List[Any] = WavaVecaForSequenceClassification(snake_case__ )
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : List[str] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : int = target_dict.pad_index
__UpperCAmelCase : Union[str, Any] = target_dict.bos_index
__UpperCAmelCase : Optional[int] = target_dict.eos_index
__UpperCAmelCase : List[str] = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(snake_case__, "vocab.json" )
if not os.path.isdir(snake_case__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case__ ) )
return
os.makedirs(snake_case__, exist_ok=snake_case__ )
__UpperCAmelCase : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : str = 0
__UpperCAmelCase : Union[str, Any] = 1
with open(snake_case__, "w", encoding="utf-8" ) as vocab_handle:
json.dump(snake_case__, snake_case__ )
__UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
snake_case__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=snake_case__, )
__UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, )
__UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=snake_case__, tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
__UpperCAmelCase : Optional[int] = WavaVecaForCTC(snake_case__ )
else:
__UpperCAmelCase : Dict = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCAmelCase : Any = argparse.Namespace(task="audio_pretraining" )
__UpperCAmelCase : str = fairseq.tasks.setup_task(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=snake_case__ )
__UpperCAmelCase : int = model[0].eval()
recursively_load_weights(snake_case__, snake_case__, not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
_snake_case = parser.parse_args()
_snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 342
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342
| 1
|
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Any) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = order
# a_{0} ... a_{k}
_snake_case : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_snake_case : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_snake_case : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_snake_case : List[Any] = [0.0] * self.order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE) < self.order:
_snake_case : int = [1.0, *a_coeffs]
if len(_SCREAMING_SNAKE_CASE) != self.order + 1:
_snake_case : Any = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_SCREAMING_SNAKE_CASE)}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) != self.order + 1:
_snake_case : int = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_SCREAMING_SNAKE_CASE)}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE)
_snake_case : Tuple = a_coeffs
_snake_case : Optional[int] = b_coeffs
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : Dict = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_snake_case : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_snake_case : str = self.input_history[:-1]
_snake_case : int = self.output_history[:-1]
_snake_case : List[Any] = sample
_snake_case : str = result
return result
| 317
|
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 0
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : Optional[int] = tmp_path / '''cache'''
lowercase__ : List[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Tuple = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : str = tmp_path / '''cache'''
lowercase__ : Tuple = {'''text''': '''string'''}
lowercase__ : Optional[int] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : int = TextDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = {'''text''': '''string'''}
lowercase__ : Tuple = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
if issubclass(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Dict = text_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Tuple = [text_path]
lowercase__ : List[str] = tmp_path / '''cache'''
lowercase__ : List[str] = {'''text''': '''string'''}
lowercase__ : List[Any] = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=("train",) ) -> List[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
lowercase__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[int] = TextDatasetReader({'''train''': text_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : List[str] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowercase__ : str = {'''text''': '''string'''}
lowercase__ : Dict = features.copy() if features else default_expected_features
lowercase__ : Any = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Optional[Any] = TextDatasetReader({'''train''': text_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if split:
lowercase__ : Any = {split: text_path}
else:
lowercase__ : Dict = '''train'''
lowercase__ : Optional[int] = {'''train''': text_path, '''test''': text_path}
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : Union[str, Any] = {'''text''': '''string'''}
lowercase__ : Optional[Any] = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 364
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 600 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [32, 16, 24, 40, 80, 112, 192] , __snake_case = [16, 24, 40, 80, 112, 192, 320] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2560 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**__snake_case )
__a =num_channels
__a =image_size
__a =width_coefficient
__a =depth_coefficient
__a =depth_divisor
__a =kernel_sizes
__a =in_channels
__a =out_channels
__a =depthwise_padding
__a =strides
__a =num_block_repeats
__a =expand_ratios
__a =squeeze_expansion_ratio
__a =hidden_act
__a =hidden_dim
__a =pooling_type
__a =initializer_range
__a =batch_norm_eps
__a =batch_norm_momentum
__a =dropout_rate
__a =drop_connect_rate
__a =sum(__snake_case ) * 4
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-5
| 218
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase : Optional[Any] = False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a =torch.manual_seed(0 )
__a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase :
"""simple docstring"""
# setable values
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase__ ( cls ):
return cls()
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
return True
@register_to_config
def __init__( self ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 1_00 ,UpperCAmelCase_ = 1.007 ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 0.05 ,UpperCAmelCase_ = 50 ,):
pass
def lowerCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = () ):
_lowercase : Dict = jnp.arange(0 ,UpperCAmelCase_ )[::-1].copy()
_lowercase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase_ ,schedule=jnp.array(UpperCAmelCase_ ,dtype=jnp.floataa ) ,timesteps=UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
if self.config.s_min <= sigma <= self.config.s_max:
_lowercase : Any = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
_lowercase : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowercase : Union[str, Any] = random.split(UpperCAmelCase_ ,num=1 )
_lowercase : str = self.config.s_noise * random.normal(key=UpperCAmelCase_ ,shape=sample.shape )
_lowercase : Optional[Any] = sigma + gamma * sigma
_lowercase : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Any = sample_hat + sigma_hat * model_output
_lowercase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
_lowercase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ ,derivative=UpperCAmelCase_ ,state=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Any = sample_prev + sigma_prev * model_output
_lowercase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
_lowercase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ ,derivative=UpperCAmelCase_ ,state=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
raise NotImplementedError()
| 336
|
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """whisper"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Dict=5_18_65 , lowercase_ :Tuple=80 , lowercase_ :str=6 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=6 , lowercase_ :Any=4 , lowercase_ :Optional[int]=15_36 , lowercase_ :Optional[Any]=15_36 , lowercase_ :Tuple=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[Any]=5_02_57 , lowercase_ :str=True , lowercase_ :Optional[int]=True , lowercase_ :Tuple="gelu" , lowercase_ :Union[str, Any]=2_56 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :Any=False , lowercase_ :str=15_00 , lowercase_ :Union[str, Any]=4_48 , lowercase_ :List[str]=5_02_56 , lowercase_ :Any=5_02_56 , lowercase_ :Optional[int]=5_02_56 , lowercase_ :List[Any]=None , lowercase_ :Dict=[2_20, 5_02_56] , lowercase_ :Optional[int]=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Tuple=False , lowercase_ :Any=0.05 , lowercase_ :Tuple=10 , lowercase_ :Dict=2 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :List[str]=10 , lowercase_ :str=0 , lowercase_ :Tuple=7 , **lowercase_ :Optional[int] , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase__ ( self :int , lowercase_ :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 2_20_50 , lowercase_ :float = 5.0 , lowercase_ :int = 2_20 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase__ ( self :Dict ) -> float:
return 1E-3
| 78
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[str]=7 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=30 , snake_case__ : Dict=400 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : str=True , snake_case__ : str=[0.5, 0.5, 0.5] , snake_case__ : int=[0.5, 0.5, 0.5] , snake_case__ : int=True , snake_case__ : Optional[int]=1 / 255 , snake_case__ : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : List[str] =size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ : List[str] =parent
lowerCamelCase_ : Any =batch_size
lowerCamelCase_ : Any =num_channels
lowerCamelCase_ : List[Any] =min_resolution
lowerCamelCase_ : Any =max_resolution
lowerCamelCase_ : str =do_resize
lowerCamelCase_ : Union[str, Any] =size
lowerCamelCase_ : Dict =do_normalize
lowerCamelCase_ : Dict =image_mean
lowerCamelCase_ : Any =image_std
lowerCamelCase_ : List[Any] =do_rescale
lowerCamelCase_ : Tuple =rescale_factor
lowerCamelCase_ : Dict =do_pad
def UpperCAmelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : Any=False ):
if not batched:
lowerCamelCase_ : str =image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ : Dict =image.size
else:
lowerCamelCase_ , lowerCamelCase_ : int =image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : Union[str, Any] =int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ : Dict =self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ : int =self.size["shortest_edge"]
lowerCamelCase_ : Dict =int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ : Optional[int] =self.size["shortest_edge"]
lowerCamelCase_ : Tuple =self.size["shortest_edge"]
else:
lowerCamelCase_ : Dict =[]
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Tuple =max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCamelCase_ : Tuple =max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Any =ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCamelCase_ : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
lowerCamelCase_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCamelCase_ : Union[str, Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[str] ):
# Initialize image_processing
lowerCamelCase_ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCamelCase_ : str =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ : List[Any] =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Union[str, Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Dict =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[Any] =image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ : int =self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# prepare image and target
lowerCamelCase_ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ : Optional[Any] =json.loads(f.read() )
lowerCamelCase_ : str ={"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ : Any =ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowerCamelCase_ : Tuple =image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ : str =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCamelCase_ : List[Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCamelCase_ : Any =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCamelCase_ : List[str] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : List[str] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCamelCase_ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCamelCase_ : Any =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
lowerCamelCase_ : Union[str, Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCamelCase_ : Optional[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# prepare image, target and masks_path
lowerCamelCase_ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ : Union[str, Any] =json.loads(f.read() )
lowerCamelCase_ : int ={"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ : Optional[int] =pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ : List[Any] =ConditionalDetrImageProcessor(format="coco_panoptic" )
lowerCamelCase_ : Dict =image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ : Any =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCamelCase_ : Optional[int] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCamelCase_ : int =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Tuple =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCamelCase_ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCamelCase_ : int =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
lowerCamelCase_ : Tuple =82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
lowerCamelCase_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCamelCase_ : List[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 209
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ : Optional[Any] =[1, 2, 3]
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 )
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : str =[1, 2]
lowerCamelCase_ : List[str] ={"a": 1, "b": 2}
lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2}
lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ : Optional[int] =[2, 3]
lowerCamelCase_ : List[Any] ={"a": 2, "b": 3}
lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3}
lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
| 209
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__magic_name__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(_A, """r""" ) as file:
for line_number, line in enumerate(_A ):
__magic_name__ : Optional[int] = line.strip()
if line:
__magic_name__ : Optional[Any] = line.split()
__magic_name__ : str = line_number
__magic_name__ : Optional[Any] = words[0]
__magic_name__ : int = value
return result
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__magic_name__ : int = getattr(_A, _A )
__magic_name__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__magic_name__ : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__ : List[Any] = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__ : Tuple = getattr(_A, _A ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : List[Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__magic_name__ : List[str] = getattr(_A, _A )
__magic_name__ : List[Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : int = value
elif weight_type == "weight_g":
__magic_name__ : Dict = value
elif weight_type == "weight_v":
__magic_name__ : Tuple = value
elif weight_type == "bias":
__magic_name__ : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__magic_name__ : str = getattr(_A, _A )
__magic_name__ : Any = value
else:
__magic_name__ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__magic_name__ : Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__ : str = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__ : Dict = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Dict = """.""".join([key, hf_param_name] )
else:
__magic_name__ : Optional[Any] = key
__magic_name__ : str = value if """lm_head""" in full_key else value[0]
__magic_name__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def UpperCamelCase ( _A, _A, _A=None, _A=None ):
"""simple docstring"""
__magic_name__ : str = False
for key, mapped_key in MAPPING.items():
__magic_name__ : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__ : Optional[int] = True
if "*" in mapped_key:
__magic_name__ : Any = name.split(_A )[0].split(""".""" )[-2]
__magic_name__ : Optional[int] = mapped_key.replace("""*""", _A )
if "weight_g" in name:
__magic_name__ : Any = """weight_g"""
elif "weight_v" in name:
__magic_name__ : Optional[int] = """weight_v"""
elif "bias" in name:
__magic_name__ : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : int = """weight"""
else:
__magic_name__ : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_A, _A, _A, _A, _A )
else:
set_recursively(_A, _A, _A, _A, _A )
return is_used
return is_used
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : List[str] = fairseq_model.state_dict()
__magic_name__ : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_A, _A, _A, _A, hf_model.config.feat_extract_norm == """group""", )
__magic_name__ : List[Any] = True
else:
__magic_name__ : Union[str, Any] = load_wavaveca_layer(_A, _A, _A )
if not is_used:
unused_weights.append(_A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
__magic_name__ : Optional[Any] = name.split(""".""" )
__magic_name__ : Tuple = int(items[0] )
__magic_name__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A=None, _A=None, _A=True, _A=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : Optional[Any] = WavaVecaConfig.from_pretrained(_A )
else:
__magic_name__ : Tuple = WavaVecaConfig()
if is_seq_class:
__magic_name__ : int = read_txt_into_dict(_A )
__magic_name__ : List[Any] = idalabel
__magic_name__ : str = WavaVecaForSequenceClassification(_A )
__magic_name__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
__magic_name__ : Tuple = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : int = target_dict.pad_index
__magic_name__ : Dict = target_dict.bos_index
__magic_name__ : Tuple = target_dict.eos_index
__magic_name__ : Any = len(target_dict.symbols )
__magic_name__ : List[Any] = os.path.join(_A, """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A, exist_ok=_A )
__magic_name__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Optional[int] = 0
__magic_name__ : Union[str, Any] = 1
with open(_A, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(_A, _A )
__magic_name__ : int = WavaVecaCTCTokenizer(
_A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_A, )
__magic_name__ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__magic_name__ : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
__magic_name__ : List[str] = WavaVecaProcessor(feature_extractor=_A, tokenizer=_A )
processor.save_pretrained(_A )
__magic_name__ : Dict = WavaVecaForCTC(_A )
else:
__magic_name__ : Dict = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__ : Union[str, Any] = argparse.Namespace(task="""audio_pretraining""" )
__magic_name__ : int = fairseq.tasks.setup_task(_A )
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=_A )
__magic_name__ : List[Any] = model[0].eval()
recursively_load_weights(_A, _A, not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__magic_name__: List[str] = parser.parse_args()
__magic_name__: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 342
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = Node(1 )
A_ : str = Node(2 )
A_ : List[str] = Node(3 )
A_ : List[Any] = Node(4 )
A_ : List[str] = Node(5 )
return tree
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
A_ : list[Any] = []
if root is None:
return output
A_ : Optional[Any] = deque([root] )
while process_queue:
A_ : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __lowercase : Node | None ,__lowercase : int ):
'''simple docstring'''
A_ : list[Any] = []
def populate_output(__lowercase : Node | None ,__lowercase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(__lowercase ,__lowercase )
return output
def UpperCamelCase ( __lowercase : Node | None ,__lowercase : int ):
'''simple docstring'''
A_ : list[Any] = []
def populate_output(__lowercase : Node | None ,__lowercase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(__lowercase ,__lowercase )
return output
def UpperCamelCase ( __lowercase : Node | None ):
'''simple docstring'''
if root is None:
return []
A_ : list[Sequence[Node | None]] = []
A_ : Optional[Any] = 0
A_ : List[str] = height(__lowercase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowercase ,__lowercase ) )
A_ : Dict = 1
else:
output.append(get_nodes_from_right_to_left(__lowercase ,__lowercase ) )
A_ : str = 0
return output
def UpperCamelCase ( ): # Main function for testing.
'''simple docstring'''
A_ : Union[str, Any] = make_tree()
print(f'''In-order Traversal: {inorder(__lowercase )}''' )
print(f'''Pre-order Traversal: {preorder(__lowercase )}''' )
print(f'''Post-order Traversal: {postorder(__lowercase )}''' ,'\n' )
print(f'''Height of Tree: {height(__lowercase )}''' ,'\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__lowercase ) ,'\n' )
print('Level-wise order Traversal: ' )
for level in range(1 ,height(__lowercase ) + 1 ):
print(f'''Level {level}:''' ,get_nodes_from_left_to_right(__lowercase ,level=__lowercase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192
|
import math
def UpperCamelCase ( __lowercase : int = 1_00 ):
'''simple docstring'''
A_ : List[Any] = sum(i * i for i in range(1 ,n + 1 ) )
A_ : int = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase : str = ['image_processor', 'tokenizer']
lowerCAmelCase : Union[str, Any] = 'ChineseCLIPImageProcessor'
lowerCAmelCase : Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[Any] ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,__lowercase ,)
lowercase__ : List[str] = kwargs.pop('''feature_extractor''' )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase ,__lowercase )
lowercase__ : Dict = self.image_processor
def __call__( self : int ,_snake_case : List[Any]=None ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=None ,**_snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(__lowercase ,return_tensors=__lowercase ,**__lowercase )
if images is not None:
lowercase__ : Optional[int] = self.image_processor(__lowercase ,return_tensors=__lowercase ,**__lowercase )
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) ,tensor_type=__lowercase )
def UpperCAmelCase ( self : Dict ,*_snake_case : Optional[Any] ,**_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def UpperCAmelCase ( self : Dict ,*_snake_case : str ,**_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__lowercase ,**__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,__lowercase ,)
return self.image_processor_class
| 16
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = '''base_with_context'''
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase =weights[F'''layers_{lyr_num}''']
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =ly_weight['''attention''']
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase =weights[F'''layers_{lyr_num}''']
_lowerCAmelCase =ly_weight['''attention''']
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase =weights[F'''layers_{lyr_num}''']
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase =ly_weight['''self_attention''']
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase =ly_weight['''MultiHeadDotProductAttention_0''']
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase =jnp.tree_util.tree_map(onp.array , A_ )
_lowerCAmelCase =[
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
_lowerCAmelCase =os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_lowerCAmelCase =inference.parse_training_gin_file(A_ , A_ )
_lowerCAmelCase =inference.InferenceModel(args.checkpoint_path , A_ )
_lowerCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_lowerCAmelCase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase =load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , A_ )
_lowerCAmelCase =load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , A_ )
_lowerCAmelCase =load_decoder(ta_checkpoint["""target"""]["""decoder"""] , A_ )
_lowerCAmelCase =OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_lowerCAmelCase =SpectrogramDiffusionPipeline(
notes_encoder=A_ , continuous_encoder=A_ , decoder=A_ , scheduler=A_ , melgan=A_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
__A = parser.parse_args()
main(args)
| 358
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__A = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__A = '''▁'''
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["input_ids", "attention_mask"]
a__ : List[Any] = BarthezTokenizer
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Union[str, Any]="<s>" , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , **__UpperCAmelCase : Any , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= vocab_file
UpperCAmelCase_= False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
UpperCAmelCase_= [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_= [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_= os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 370
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= emb.weight.shape
UpperCAmelCase_= nn.Linear(lowerCAmelCase_ ,lowerCAmelCase_ ,bias=lowerCAmelCase_ )
UpperCAmelCase_= emb.weight.data
return lin_layer
def __a ( lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_= checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase_ )
UpperCAmelCase_= state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_= {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_= XGLMConfig(
vocab_size=lowerCAmelCase_ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
UpperCAmelCase_= XGLMForCausalLM(lowerCAmelCase_ )
UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
UpperCAmelCase_= make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 277
| 0
|
def lowerCAmelCase__(__snake_case = 100 ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for i in range(1 ,n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 209
|
_a = 65_521
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for plain_chr in plain_text:
lowerCamelCase__ = (a + ord(__snake_case )) % MOD_ADLER
lowerCamelCase__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 209
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[int]:
"""simple docstring"""
A__ = 0
A__ = len(lowercase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A__ = i + 1
else:
A__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 231
|
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 231
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = VOCAB_FILES_NAMES
UpperCAmelCase__: Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[int] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : List[Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Optional[int] = do_lower_case
A__ : int = strip_accents
A__ : str = pre_tok_class(**A__ )
A__ : Any = do_lower_case
def __getstate__( self ):
A__ : int = self.__dict__.copy()
A__ : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : Union[str, Any] = d
A__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Dict = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Tuple = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 192
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Optional[int] = '▁'
lowercase : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowercase : Any = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowercase : List[Any] = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :str , a :Any , a :Optional[Any] , a :Dict="<s>" , a :Dict="</s>" , a :Optional[Any]="</s>" , a :Union[str, Any]="<s>" , a :Optional[Any]="<unk>" , a :Tuple="<pad>" , a :Any="<mask>" , a :Tuple = None , **a :str , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__UpperCamelCase : int = vocab_file
__UpperCamelCase : Tuple = monolingual_vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase : str = {}
__UpperCamelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase : Dict = cnt
cnt += 1
with open(_a , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
__UpperCamelCase : List[Any] = line.strip().split()[0]
__UpperCamelCase : str = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase : Tuple = len(self.fairseq_tokens_to_ids )
__UpperCamelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : List[str] = None
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Optional[Any] , a :int ) -> Any:
__UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :int = None ) -> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Optional[int] = [self.cls_token_id]
__UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self :List[str] , a :Dict , a :Optional[Any] = None , a :str = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _lowerCamelCase ( self :Optional[int] , a :Tuple , a :List[str] = None ) -> Tuple:
__UpperCamelCase : Any = [self.sep_token_id]
__UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self :Any ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self :Tuple , a :Any ) -> Optional[int]:
return self.sp_model.encode(_a , out_type=_a )
def _lowerCamelCase ( self :Tuple , a :Optional[int] ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCamelCase ( self :Any , a :str ) -> str:
return self.fairseq_ids_to_tokens[index]
def _lowerCamelCase ( self :List[str] , a :Tuple ) -> int:
__UpperCamelCase : str = "".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Dict , a :List[str] , a :Union[str, Any] = None ) -> List[str]:
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : int = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Tuple = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__UpperCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(_a )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 361
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Dict:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :ArgumentParser ) -> str:
__UpperCamelCase : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=a )
download_parser.add_argument(
"--accelerate-config_file" , default=a , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a )
def __init__( self :Tuple , a :Dict , *a :List[str] ) -> None:
__UpperCamelCase : List[str] = accelerate_config_file
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : int = "not installed"
if is_safetensors_available():
import safetensors
__UpperCamelCase : List[str] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
__UpperCamelCase : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
__UpperCamelCase : List[str] = "not installed"
__UpperCamelCase : List[str] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCamelCase : Tuple = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a ):
__UpperCamelCase : Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCamelCase : int = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(a , a )
else f'\t{accelerate_config}'
)
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Dict = "NA"
if is_torch_available():
import torch
__UpperCamelCase : Optional[int] = torch.__version__
__UpperCamelCase : Optional[Any] = torch.cuda.is_available()
__UpperCamelCase : Dict = "not installed"
__UpperCamelCase : str = "NA"
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
__UpperCamelCase : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCamelCase : Optional[Any] = bool(tf.config.list_physical_devices("GPU" ) )
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Any = "not installed"
__UpperCamelCase : Tuple = "not installed"
__UpperCamelCase : Optional[int] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCamelCase : int = flax.__version__
__UpperCamelCase : Any = jax.__version__
__UpperCamelCase : Optional[int] = jaxlib.__version__
__UpperCamelCase : List[Any] = jax.lib.xla_bridge.get_backend().platform
__UpperCamelCase : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def _lowerCamelCase ( a :str ) -> int:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 151
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: UNetaDModel
_lowerCamelCase: KarrasVeScheduler
def __init__( self : List[Any] ,A_ : UNetaDModel ,A_ : KarrasVeScheduler ) -> Dict:
super().__init__()
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : int = 50 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = self.unet.config.sample_size
A = (batch_size, 3, img_size, img_size)
A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A = randn_tensor(A_ ,generator=A_ ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A = self.scheduler.schedule[t]
A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A , A = self.scheduler.add_noise_to_input(A_ ,A_ ,generator=A_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A = self.scheduler.step(A_ ,A_ ,A_ ,A_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
A = self.scheduler.step_correct(
A_ ,A_ ,A_ ,A_ ,step_output.prev_sample ,step_output['derivative'] ,)
A = step_output.prev_sample
A = (sample / 2 + 0.5).clamp(0 ,1 )
A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 74
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "deberta-v2"
def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = relative_attention
_snake_case = max_relative_positions
_snake_case = pad_token_id
_snake_case = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
_snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_snake_case = pos_att_type
_snake_case = vocab_size
_snake_case = layer_norm_eps
_snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase )
_snake_case = pooler_dropout
_snake_case = pooler_hidden_act
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase (self ) -> int:
return 12
def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]:
_snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 341
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_UpperCAmelCase = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
_UpperCAmelCase = load_dataset('nielsr/rvlcdip-demo' )
_UpperCAmelCase = dataset['train'][0]['image'].convert('RGB' )
_UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 185
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ :Optional[Any] = datasets.logging.get_logger(__name__)
lowerCAmelCase__ :str = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ :List[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ :Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCAmelCase__ ( a__: int , a__: int , a__: Dict=False , a__: str=False , a__: Optional[int]=True , a__: Any=False , a__: str="dummy_doc" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {doc: key_lines}
_UpperCAmelCase = {doc: sys_lines}
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , key_doc_lines[doc] , a__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , sys_doc_lines[doc] , a__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
if remove_nested:
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: List[str] , a__: Optional[int] , a__: Optional[Any] , a__: Any , a__: Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = get_coref_infos(a__ , a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for name, metric in metrics:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = evaluator.evaluate_documents(a__ , a__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(1_0 ) , F'''Recall: {recall * 1_0_0:.2f}''' , F''' Precision: {precision * 1_0_0:.2f}''' , F''' F1: {fa * 1_0_0:.2f}''' , )
if conll_subparts_num == 3:
_UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_UpperCAmelCase = line.split()[5]
if not parse_col == "-":
_UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_UpperCAmelCase = util.check_gold_parse_annotation(_SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCAmelCase = evaluate(
key_lines=_SCREAMING_SNAKE_CASE , sys_lines=_SCREAMING_SNAKE_CASE , metrics=_SCREAMING_SNAKE_CASE , NP_only=_SCREAMING_SNAKE_CASE , remove_nested=_SCREAMING_SNAKE_CASE , keep_singletons=_SCREAMING_SNAKE_CASE , min_span=_SCREAMING_SNAKE_CASE , )
return score
| 185
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
__a = json.load(lowerCAmelCase__ )
__a = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['module']
# Load the entity vocab file
__a = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
__a = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__a = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__a = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__a = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
__a = json.load(lowerCAmelCase__ )
__a = 'MLukeTokenizer'
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
__a = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__a = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__a = state_dict['embeddings.word_embeddings.weight']
__a = word_emb[ent_init_index].unsqueeze(0 )
__a = word_emb[enta_init_index].unsqueeze(0 )
__a = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__a = state_dict[bias_name]
__a = decoder_bias[ent_init_index].unsqueeze(0 )
__a = decoder_bias[enta_init_index].unsqueeze(0 )
__a = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__a = f'''encoder.layer.{layer_index}.attention.self.'''
__a = state_dict[prefix + matrix_name]
__a = state_dict[prefix + matrix_name]
__a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__a = state_dict['entity_embeddings.entity_embeddings.weight']
__a = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__a = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__a = state_dict['entity_predictions.bias']
__a = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__a = torch.cat([entity_prediction_bias, entity_mask_bias] )
__a = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__a = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__a = state_dict[key]
else:
__a = state_dict[key]
__a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
__a = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__a = (0, 9)
__a = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
__a = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a = torch.Size((1, 33, 768) )
__a = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a = torch.Size((1, 1, 768) )
__a = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
__a = 'Tokyo is the capital of <mask>.'
__a = (24, 30)
__a = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
__a = model(**lowerCAmelCase__ )
__a = encoding['input_ids'][0].tolist()
__a = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__a = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
__a = outputs.entity_logits[0][0].argmax().item()
__a = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple:
__a = ['[MASK]', '[PAD]', '[UNK]']
__a = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
__a = {}
for entry in data:
__a = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__a = entity_id
break
__a = f'''{language}:{entity_name}'''
__a = entity_id
return new_mapping
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 45
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ (A : List[str] ):
snake_case__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : str ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : str = nn.Linear(A , A , bias=A )
snake_case__ : str = emb.weight.data
return lin_layer
def lowercase_ (A : Optional[int] , A : Union[str, Any]=None ):
snake_case__ : Any = {}
for old_key in state_dict.keys():
snake_case__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case__ : int = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case__ : Any = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
snake_case__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
snake_case__ : str = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
snake_case__ : str = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
snake_case__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
snake_case__ : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
snake_case__ : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' )
snake_case__ : Dict = state_dict[old_key]
return new_dict
def lowercase_ (A : List[Any] , A : Tuple , A : List[Any] , A : List[str] , A : str = WEIGHTS_NAME ):
snake_case__ : Dict = []
snake_case__ : str = 0
os.makedirs(A , exist_ok=A )
for expert in range(A ):
snake_case__ : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A ):
snake_case__ : Optional[Any] = torch.load(A )['model']
remove_ignore_keys_(A )
snake_case__ : Optional[Any] = rename_fairseq_keys(A , A )
snake_case__ : Dict = os.path.join(
A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
torch.save(A , A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A )[0]].dtype )
# Add the last block
snake_case__ : Tuple = os.path.join(A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
snake_case__ : Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A )
snake_case__ : str = rename_fairseq_keys(A , A )
snake_case__ : Any = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A ) == 1:
snake_case__ : Any = os.path.join(A , A )
torch.save(A , A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A , A )
# Otherwise, let's build the index
snake_case__ : Tuple = {}
for idx, shard in enumerate(A ):
snake_case__ : Optional[int] = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A ):05d}.bin''' )
snake_case__ : List[Any] = os.path.join(A , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A , os.path.join(A , A ) )
for key in shard:
snake_case__ : Any = shard_file
# Add the metadata
snake_case__ : int = {'total_size': total_size}
snake_case__ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n'
f.write(A )
return metadata, index
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a_ :Optional[Any] = parser.parse_args()
a_ , a_ :Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a_ :List[str] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 277
| 0
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
__UpperCamelCase = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
__UpperCamelCase = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="auto" , lowerCAmelCase__=-1 , lowerCAmelCase__=0.9 , lowerCAmelCase__=5 , lowerCAmelCase__=500 , lowerCAmelCase__="gpt2-large" , lowerCAmelCase__=-1 , lowerCAmelCase__=1_024 , lowerCAmelCase__=25 , lowerCAmelCase__=5 , lowerCAmelCase__=True , lowerCAmelCase__=25 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = compute_mauve(
p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , )
return out
| 38
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Any = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 38
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self , _UpperCamelCase ) -> Optional[int]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_UpperCamelCase )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , only_pretrain_model=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , torchscript=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __a ( self ) -> Any:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , fpaa=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
lowerCAmelCase_ = "sshleifer/tinier_bart"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> int:
lowerCAmelCase_ = "sshleifer/tinier_bart"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , save_to_csv=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_UpperCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_UpperCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_UpperCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(_UpperCamelCase , "env.csv" ) , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "env.csv" ) ).exists() )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_UpperCamelCase ):
self.assertTrue(hasattr(_UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(_UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(_UpperCamelCase , "current" ) )
self.assertTrue(hasattr(_UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCamelCase , "log.txt" ) , log_print=_UpperCamelCase , trace_memory_line_by_line=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCamelCase )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "log.txt" ) ).exists() )
| 231
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase__ ( __lowerCAmelCase : List[DatasetType] , __lowerCAmelCase : Optional[List[float]] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__lowerCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.""" )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
else:
return _interleave_iterable_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : List[DatasetType] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__lowerCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.""" )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
else:
return _concatenate_iterable_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
| 231
| 1
|
import csv
import tweepy
# Twitter API credentials
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
def UpperCamelCase_( snake_case__: str ) -> None:
# authorize twitter, initialize tweepy
UpperCAmelCase__ = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase__ = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase__ = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
print(f"...{len(_UpperCAmelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , 'w' ) as f:
UpperCAmelCase__ = csv.writer(_UpperCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 350
|
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ = '''path-to-your-trained-model'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ = pipe.to(device)
# to channels last
lowerCAmelCase_ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ = torch.randn(2, 4, 64, 64)
lowerCAmelCase_ = torch.rand(1) * 9_99
lowerCAmelCase_ = torch.randn(2, 77, 7_68)
lowerCAmelCase_ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ = 6_66
lowerCAmelCase_ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 8
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Union[str, Path]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Optional[Union[str, bool]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
def UpperCAmelCase_ ( self : Tuple ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 151
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 185
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['image_processor']
lowerCamelCase : str = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : str = -10
__lowerCamelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
__lowerCamelCase : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCamelCase : List[Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCamelCase : Dict = original_sizes.numpy()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCamelCase , __lowerCamelCase : Tuple = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
__lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : str = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCamelCase : Dict = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCamelCase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
__lowerCamelCase : Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
__lowerCamelCase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = processed_input_points
return input_points, input_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> np.ndarray:
__lowerCamelCase , __lowerCamelCase : Tuple = original_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
__lowerCamelCase : Optional[int] = coords.reshape(-1 , 2 , 2 )
__lowerCamelCase : List[Any] = coords[..., 0] * (new_w / old_w)
__lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCamelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCamelCase : List[str] = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCamelCase : str = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
__lowerCamelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCamelCase : Any = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
__lowerCamelCase : Dict = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : int = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCamelCase : List[Any] = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCamelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 185
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''blip_text_model'''
def __init__( self , lowercase=3_0524 , lowercase=768 , lowercase=768 , lowercase=3072 , lowercase=768 , lowercase=12 , lowercase=8 , lowercase=512 , lowercase="gelu" , lowercase=1e-12 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=3_0522 , lowercase=2 , lowercase=0 , lowercase=102 , lowercase=True , lowercase=True , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
a__ : Any = vocab_size
a__ : Tuple = hidden_size
a__ : Optional[int] = encoder_hidden_size
a__ : Tuple = intermediate_size
a__ : Union[str, Any] = projection_dim
a__ : str = hidden_dropout_prob
a__ : List[Any] = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Union[str, Any] = max_position_embeddings
a__ : Any = layer_norm_eps
a__ : str = hidden_act
a__ : Any = initializer_range
a__ : Any = attention_probs_dropout_prob
a__ : Optional[int] = is_decoder
a__ : Any = use_cache
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase)
a__ , a__ : Dict = cls.get_config_dict(lowercase , **lowercase)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
a__ : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase , **lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''blip_vision_model'''
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=384 , lowercase=16 , lowercase="gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=1e-10 , **lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowercase)
a__ : List[str] = hidden_size
a__ : Tuple = intermediate_size
a__ : List[Any] = projection_dim
a__ : Optional[int] = num_hidden_layers
a__ : int = num_attention_heads
a__ : int = patch_size
a__ : Optional[int] = image_size
a__ : Any = initializer_range
a__ : Union[str, Any] = attention_dropout
a__ : Optional[Any] = layer_norm_eps
a__ : Tuple = hidden_act
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase)
a__ , a__ : Optional[int] = cls.get_config_dict(lowercase , **lowercase)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
a__ : int = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase , **lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Any = '''blip'''
__A : Union[str, Any] = True
def __init__( self , lowercase=None , lowercase=None , lowercase=512 , lowercase=2.65_92 , lowercase=256 , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**lowercase)
if text_config is None:
a__ : Dict = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
a__ : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
a__ : List[str] = BlipTextConfig(**lowercase)
a__ : List[str] = BlipVisionConfig(**lowercase)
a__ : Any = self.vision_config.hidden_size
a__ : Tuple = projection_dim
a__ : Union[str, Any] = logit_scale_init_value
a__ : Tuple = 1.0
a__ : Dict = 0.02
a__ : Dict = image_text_hidden_size
@classmethod
def __lowercase ( cls , lowercase , lowercase , **lowercase) -> int:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = copy.deepcopy(self.__dict__)
a__ : Optional[Any] = self.text_config.to_dict()
a__ : Dict = self.vision_config.to_dict()
a__ : str = self.__class__.model_type
return output
| 225
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( A__ ) -> Optional[int]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( A__ , A__ = True ) -> int:
a__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : Union[str, Any] = is_compiled_module(A__ )
if is_compiled:
a__ : List[str] = model
a__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
a__ : str = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(A__ , 'forward' )
a__ : List[Any] = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : List[Any] = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
a__ : List[str] = model
a__ : Any = compiled_model
return model
def A_ ( ) -> int:
PartialState().wait_for_everyone()
def A_ ( A__ , A__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def A_ ( **A__ ) -> Any:
for key, value in kwargs.items():
a__ : Optional[int] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( A__ ) -> List[str]:
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
a__ : Dict = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def A_ ( A__ , A__ ) -> Dict:
for key, value in source.items():
if isinstance(A__ , A__ ):
a__ : Optional[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
a__ : Optional[int] = value
return destination
def A_ ( A__ = None ) -> bool:
if port is None:
a__ : List[Any] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 225
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = torch.device('''cpu''')
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase :Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[Any] = dct.pop(__magic_name__ )
UpperCamelCase :Optional[Any] = val
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = []
for k in state_dict.keys():
UpperCamelCase :int = k
if ".pwconv" in k:
UpperCamelCase :Any = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCamelCase :List[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCamelCase :Tuple = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCamelCase :Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCamelCase :Tuple = k_new.split(""".""" )
if ls[2].isdigit():
UpperCamelCase :Optional[int] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCamelCase :Optional[int] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase :str = 1000
UpperCamelCase :Optional[int] = """huggingface/label-files"""
UpperCamelCase :int = """imagenet-1k-id2label.json"""
UpperCamelCase :List[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase :Optional[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCamelCase :Any = idalabel
UpperCamelCase :Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase :List[Any] = [3, 3, 6, 4]
UpperCamelCase :Union[str, Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase :Optional[Any] = [3, 3, 9, 6]
UpperCamelCase :Dict = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase :Optional[int] = [4, 3, 10, 5]
UpperCamelCase :str = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase :Any = [4, 4, 12, 6]
UpperCamelCase :Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCamelCase :Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" , check_hash=__magic_name__ )
else:
UpperCamelCase :List[str] = torch.load(__magic_name__ , map_location="""cpu""" )
UpperCamelCase :Optional[int] = checkpoint
UpperCamelCase :Optional[int] = create_rename_keys(__magic_name__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
# load HuggingFace model
UpperCamelCase :Tuple = SwiftFormerForImageClassification(__magic_name__ ).eval()
hf_model.load_state_dict(__magic_name__ )
# prepare test inputs
UpperCamelCase :List[Any] = prepare_img()
UpperCamelCase :int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCamelCase :int = processor(images=__magic_name__ , return_tensors="""pt""" )
# compare outputs from both models
UpperCamelCase :str = get_expected_output(__magic_name__ )
UpperCamelCase :Optional[int] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __magic_name__ , atol=1E-3 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 38
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38
| 1
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(len(__lowerCAmelCase ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
for j in range(__lowerCAmelCase ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE__ : Tuple = False
break
if match_found:
position.append(__lowerCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 56
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__: Tuple = "http://www.mocksite.com/file1.txt"
__magic_name__: int = "\"text\": [\"foo\", \"foo\"]"
__magic_name__: Any = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case__ :
lowercase__ : List[Any] = 200
lowercase__ : Optional[int] = {'''Content-Length''': '''100'''}
lowercase__ : Dict = {}
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
return [bytes(lowerCAmelCase__ , """utf-8""" )]
def UpperCamelCase ( *_A, **_A ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""", [str, list, dict] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
import requests
monkeypatch.setattr(_A, """request""", _A )
__magic_name__ : Union[str, Any] = URL
if issubclass(_A, _A ):
__magic_name__ : Optional[int] = url
elif issubclass(_A, _A ):
__magic_name__ : str = [url]
elif issubclass(_A, _A ):
__magic_name__ : List[str] = {"""train""": url}
__magic_name__ : Optional[int] = """dummy"""
__magic_name__ : Tuple = """downloads"""
__magic_name__ : Dict = tmp_path
__magic_name__ : Any = DownloadConfig(
cache_dir=os.path.join(_A, _A ), use_etag=_A, )
__magic_name__ : List[Any] = DownloadManager(dataset_name=_A, download_config=_A )
__magic_name__ : List[str] = dl_manager.download(_A )
__magic_name__ : str = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_A, _A ):
__magic_name__ : str = [downloaded_paths]
__magic_name__ : Optional[int] = [urls]
elif isinstance(_A, _A ):
assert "train" in downloaded_paths.keys()
__magic_name__ : Union[str, Any] = downloaded_paths.values()
__magic_name__ : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_A, _A ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__magic_name__ : List[Any] = Path(_A )
__magic_name__ : Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__magic_name__ : Tuple = downloaded_path.read_text()
assert content == CONTENT
__magic_name__ : Optional[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__magic_name__ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""", [str, list, dict] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = str(_A )
if issubclass(_A, _A ):
__magic_name__ : str = filename
elif issubclass(_A, _A ):
__magic_name__ : Union[str, Any] = [filename]
elif issubclass(_A, _A ):
__magic_name__ : Any = {"""train""": filename}
__magic_name__ : int = """dummy"""
__magic_name__ : Optional[Any] = xz_file.parent
__magic_name__ : Tuple = """extracted"""
__magic_name__ : List[str] = DownloadConfig(
cache_dir=_A, use_etag=_A, )
__magic_name__ : str = DownloadManager(dataset_name=_A, download_config=_A )
__magic_name__ : str = dl_manager.extract(_A )
__magic_name__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(_A, _A ):
__magic_name__ : List[Any] = [extracted_paths]
__magic_name__ : Dict = [paths]
elif isinstance(_A, _A ):
assert "train" in extracted_paths.keys()
__magic_name__ : Dict = extracted_paths.values()
__magic_name__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_A, _A ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__magic_name__ : Union[str, Any] = Path(_A )
__magic_name__ : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_A, etag=_A )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__magic_name__ : Dict = extracted_path.read_text()
__magic_name__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_A, start=1 ):
__magic_name__ : Union[str, Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""", ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = request.getfixturevalue(_A )
__magic_name__ : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
_test_jsonl(_A, _A )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""", ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = request.getfixturevalue(_A )
__magic_name__ : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
_test_jsonl(_A, _A )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_A ), start=1 ):
assert os.path.basename(_A ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 352
|
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138
| 0
|
A : Tuple = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 305
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Any = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCamelCase__ : List[str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def UpperCAmelCase_ ( __UpperCAmelCase : str = "dhaka" , __UpperCAmelCase : int = 5 ) -> int:
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , 50 ) # Prevent abuse!
SCREAMING_SNAKE_CASE_ = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
SCREAMING_SNAKE_CASE_ = requests.get('https://www.google.com/search' , params=__UpperCAmelCase , headers=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = BeautifulSoup(html.text , 'html.parser' )
SCREAMING_SNAKE_CASE_ = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
SCREAMING_SNAKE_CASE_ = json.dumps(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = json.loads(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __UpperCAmelCase , )
if not matched_google_image_data:
return 0
SCREAMING_SNAKE_CASE_ = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__UpperCAmelCase ) , )
SCREAMING_SNAKE_CASE_ = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __UpperCAmelCase , )
for index, fixed_full_res_image in enumerate(__UpperCAmelCase ):
if index >= max_images:
return index
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'ascii' ).decode(
'unicode-escape' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'ascii' ).decode(
'unicode-escape' )
SCREAMING_SNAKE_CASE_ = urllib.request.build_opener()
SCREAMING_SNAKE_CASE_ = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCAmelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
lowerCamelCase__ : int = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 225
|
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 225
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizer
__UpperCAmelCase : int = OpenAIGPTTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_a : int = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[str] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
return "lower newer", "lower newer"
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
_a : Dict = 'lower'
_a : List[str] = ['low', 'er</w>']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = tokens + ['<unk>']
_a : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Union[str, Any] ,_a : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : int = self.rust_tokenizer_class.from_pretrained(_a ,**_a )
# Simple input
_a : Dict = 'This is a simple input'
_a : str = ['This is a simple input 1', 'This is a simple input 2']
_a : str = ('This is a simple input', 'This is a pair')
_a : Union[str, Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding='max_length' )
# Simple input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding='max_length' )
# Simple input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding='max_length' ,)
# Pair input
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding='max_length' )
# Pair input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding='max_length' )
# Pair input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding='max_length' ,)
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
| 5
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a : List[str] = logging.get_logger(__name__)
a : Dict[Optional[str], Type[Formatter]] = {}
a : Dict[Optional[str], str] = {}
a : Dict[Optional[str], Exception] = {}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, ) -> List[Any]:
'''simple docstring'''
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
snake_case_ = format_type
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None ) -> Any:
'''simple docstring'''
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
a : Tuple = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
a : List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
a : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __magic_name__ ( __UpperCAmelCase ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __magic_name__ ( __UpperCAmelCase, **__UpperCAmelCase ) -> Formatter:
'''simple docstring'''
snake_case_ = get_format_type_from_alias(__UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 56
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
a : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 56
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = CustomTokenizer
pass
| 90
|
def __UpperCamelCase ( lowerCAmelCase__ : list[list[int | float]] ):
__a : int = len(lowerCAmelCase__ )
__a : Dict = len(matrix[0] )
__a : Union[str, Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
for row in range(lowerCAmelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCAmelCase__ ):
__a : Dict = matrix[col][row] / matrix[row][row]
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__a : Optional[int] = True
for i in range(row + 1 , lowerCAmelCase__ ):
if matrix[i][row] != 0:
__a , __a : Any = matrix[i], matrix[row]
__a : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(lowerCAmelCase__ ):
__a : Optional[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "mctct"
def __init__( self: Tuple , UpperCamelCase: Any=80_65 , UpperCamelCase: str=15_36 , UpperCamelCase: int=36 , UpperCamelCase: Dict=61_44 , UpperCamelCase: List[str]=4 , UpperCamelCase: Dict=3_84 , UpperCamelCase: str=9_20 , UpperCamelCase: Tuple=1e-5 , UpperCamelCase: Tuple=0.3 , UpperCamelCase: Optional[Any]="relu" , UpperCamelCase: Tuple=0.02 , UpperCamelCase: Any=0.3 , UpperCamelCase: Optional[int]=0.3 , UpperCamelCase: Tuple=1 , UpperCamelCase: Union[str, Any]=0 , UpperCamelCase: Optional[int]=2 , UpperCamelCase: Optional[Any]=1 , UpperCamelCase: List[Any]=0.3 , UpperCamelCase: Union[str, Any]=1 , UpperCamelCase: List[str]=(7,) , UpperCamelCase: str=(3,) , UpperCamelCase: List[Any]=80 , UpperCamelCase: Union[str, Any]=1 , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: str="sum" , UpperCamelCase: Any=False , **UpperCamelCase: Dict , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = num_attention_heads
A__ = attention_head_dim
A__ = max_position_embeddings
A__ = layer_norm_eps
A__ = layerdrop
A__ = hidden_act
A__ = initializer_range
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = pad_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = conv_glu_dim
A__ = conv_dropout
A__ = num_conv_layers
A__ = input_feat_per_channel
A__ = input_channels
A__ = conv_channels
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
A__ = list(UpperCAmelCase_ )
A__ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 335
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "vit_mae"
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-12 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[Any]=2048 , UpperCAmelCase_ : Tuple=0.75 , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Any = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Any = qkv_bias
lowerCAmelCase : Union[str, Any] = decoder_num_attention_heads
lowerCAmelCase : Tuple = decoder_hidden_size
lowerCAmelCase : int = decoder_num_hidden_layers
lowerCAmelCase : Optional[Any] = decoder_intermediate_size
lowerCAmelCase : Union[str, Any] = mask_ratio
lowerCAmelCase : Any = norm_pix_loss
| 138
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCAmelCase__ : List[str] = number_of_bytes // partitions
lowerCAmelCase__ : Union[str, Any] = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = i * bytes_per_partition + 1
lowerCAmelCase__ : Union[str, Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 212
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self :str , *SCREAMING_SNAKE_CASE :Dict , **SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :str , *SCREAMING_SNAKE_CASE :List[Any] , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Optional[int] , *SCREAMING_SNAKE_CASE :Optional[Any] , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self :Optional[Any] , *SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :List[Any] , *SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :List[str] , *SCREAMING_SNAKE_CASE :Any , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : Tuple = ["torch", "transformers", "onnx"]
def __init__( self :Any , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :List[Any] , *SCREAMING_SNAKE_CASE :Optional[int] , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Tuple , *SCREAMING_SNAKE_CASE :Any , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self :Tuple , *SCREAMING_SNAKE_CASE :Union[str, Any] , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Any , *SCREAMING_SNAKE_CASE :Union[str, Any] , **SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Optional[Any] , *SCREAMING_SNAKE_CASE :Optional[int] , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self :Optional[int] , *SCREAMING_SNAKE_CASE :int , **SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :int , *SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :Any ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :List[Any] , *SCREAMING_SNAKE_CASE :Dict , **SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A__ ( metaclass=UpperCAmelCase__ ):
__UpperCamelCase : str = ["torch", "transformers", "onnx"]
def __init__( self :Optional[int] , *SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :List[str] , *SCREAMING_SNAKE_CASE :Union[str, Any] , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Dict , *SCREAMING_SNAKE_CASE :List[Any] , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 276
|
'''simple docstring'''
import math
import unittest
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self ) -> List[Any]:
with self.assertRaises(_lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 158
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase : Tuple = Lock()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , __A ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_snake_case = min(__A , __A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_snake_case = max(__A , __A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
_snake_case = []
_snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_snake_case = Pipe()
_snake_case = Pipe()
process_array_.append(
Process(
target=__A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_snake_case = temp_rs
_snake_case = temp_rr
for i in range(1 , len(__A ) - 1 ):
_snake_case = Pipe()
_snake_case = Pipe()
process_array_.append(
Process(
target=__A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_snake_case = temp_rs
_snake_case = temp_rr
process_array_.append(
Process(
target=__A , args=(
len(__A ) - 1,
arr[len(__A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__A ) ):
_snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__A )
_snake_case = odd_even_transposition(__A )
print('Sorted List\n' )
print(*__A )
if __name__ == "__main__":
main()
| 160
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : List[Any] = os.path.join(git_repo_path, "src", "diffusers")
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCAmelCase_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers' , lowerCAmelCase_ )
self.assertIn('flax_and_transformers' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' )
_snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
| 160
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowercase =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_lowercase =['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def __A (self , UpperCAmelCase ) -> List[Any]:
return "lower newer", "lower newer"
def __A (self ) -> Dict:
_lowercase =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowercase ='''lower'''
_lowercase =['''low''', '''er</w>''']
_lowercase =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =tokens + ['''<unk>''']
_lowercase =[1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def __A (self , UpperCAmelCase=1_5 ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowercase =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
_lowercase ='''This is a simple input'''
_lowercase =['''This is a simple input 1''', '''This is a simple input 2''']
_lowercase =('''This is a simple input''', '''This is a pair''')
_lowercase =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def __A (self ) -> str:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase__ ( lowerCAmelCase):
pass
| 5
|
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase = []
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if row >= len(_SCREAMING_SNAKE_CASE ):
solution.append(_SCREAMING_SNAKE_CASE )
printboard(_SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 1
solve(_SCREAMING_SNAKE_CASE , row + 1 )
lowerCAmelCase = 0
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase = 8
UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 369
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase = []
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if row >= len(_SCREAMING_SNAKE_CASE ):
solution.append(_SCREAMING_SNAKE_CASE )
printboard(_SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 1
solve(_SCREAMING_SNAKE_CASE , row + 1 )
lowerCAmelCase = 0
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase = 8
UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 187
| 0
|
__A = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 90
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = DebertaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 90
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__a = logging.get_logger(__name__)
__a = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__a = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__a = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__a = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__a = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__a = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__a = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__a = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__a = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__a = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__a = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__a = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__a = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__a = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_MAPPING
__a = auto_class_update(FlaxAutoModel)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__a = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__a = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__a = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __a( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__a = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 235
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = r'''\w+[.]\d+'''
UpperCAmelCase_ : int = re.findall(_lowercase , _lowercase )
for pat in pats:
UpperCAmelCase_ : List[Any] = key.replace(_lowercase , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=42 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : str = flax_model.init_weights(PRNGKey(_lowercase ) )
UpperCAmelCase_ : List[Any] = flatten_dict(_lowercase )
UpperCAmelCase_ : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = rename_key(_lowercase )
UpperCAmelCase_ : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_ : Any = rename_key_and_reshape_tensor(_lowercase , _lowercase , _lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : int = jnp.asarray(_lowercase )
return unflatten_dict(_lowercase )
| 235
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase__ = datasets.logging.get_logger(__name__)
lowerCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="dummy_doc" ) -> List[str]:
lowerCAmelCase__ : List[str] = {doc: key_lines}
lowerCAmelCase__ : int = {doc: sys_lines}
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ , lowerCAmelCase__ : int = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : List[Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : int = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase__ , lowerCAmelCase__ : int = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase__ : Union[str, Any] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
lowerCAmelCase__ : List[Any] = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : str = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCAmelCase__ : Union[str, Any] = line.split()[5]
if not parse_col == "-":
lowerCAmelCase__ : List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def _lowerCamelCase ( self : int , a : List[str] , a : Tuple , a : Optional[Any]=True , a : Optional[Any]=False , a : Tuple=False , a : Optional[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCAmelCase__ : Any = util.check_gold_parse_annotation(a )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase__ : Optional[int] = evaluate(
key_lines=a , sys_lines=a , metrics=a , NP_only=a , remove_nested=a , keep_singletons=a , min_span=a , )
return score
| 212
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'this is a test'
lowerCAmelCase__ : List[Any] = 'this is a test'
return input_text, output_text
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = '<pad>'
lowerCAmelCase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(a ) , 30_000 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : str = tokenizer.tokenize(a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Dict = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a , keep_accents=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [48, 25, 21, 1_289] )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = AlbertTokenizer(a )
lowerCAmelCase__ : Tuple = tokenizer.encode('sequence builders' )
lowerCAmelCase__ : Any = tokenizer.encode('multi-sequence build' )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 212
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowercase :
lowercase = 42
lowercase = 42
class _lowercase :
def __init__( self : List[Any] , snake_case : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : list[list[Edge]] = [[] for _ in range(snake_case )]
UpperCamelCase_ : Optional[int] = size
def __getitem__( self : str , snake_case : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
return self._size
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int , snake_case : int , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : int ) -> int | None:
"""simple docstring"""
UpperCamelCase_ : List[str] = deque([start_vertex] )
UpperCamelCase_ : list[int | None] = [None] * self.size
UpperCamelCase_ : List[str] = 0
while queue:
UpperCamelCase_ : Any = queue.popleft()
UpperCamelCase_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase_ : Tuple = current_distance + edge.weight
UpperCamelCase_ : Tuple = distances[edge.destination_vertex]
if (
isinstance(snake_case , snake_case )
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase_ : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def __A ( a_ :Accelerator , a_ :int = 16) -> Optional[Any]:
__a : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''')
__a : str = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ :Optional[int]):
# max_length=None => use the model max length (it's actually the default)
__a : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : List[str] = datasets.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : int = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ :Optional[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Any = 16
elif accelerator.mixed_precision != "no":
__a : Union[str, Any] = 8
else:
__a : Any = None
return tokenizer.pad(
a_ , padding='''longest''' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
__a : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def __A ( a_ :List[str] , a_ :int) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a_) == "1":
__a : Dict = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__a : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir)
else:
__a : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['''lr''']
__a : List[str] = int(config['''num_epochs'''])
__a : Optional[Any] = int(config['''seed'''])
__a : Union[str, Any] = int(config['''batch_size'''])
set_seed(a_)
__a , __a : str = get_dataloaders(a_ , a_)
__a : List[Any] = evaluate.load('''glue''' , '''mrpc''')
# If the batch size is too big we use gradient accumulation
__a : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__a : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a_)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : List[str] = model.to(accelerator.device)
# Instantiate optimizer
__a : Tuple = AdamW(params=model.parameters() , lr=a_)
# Instantiate scheduler
__a : int = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_00 , num_training_steps=(len(a_) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[int] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_)
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__a : Dict = os.path.split(a_)[-1].split('''.''')[0]
accelerator.init_trackers(a_ , a_)
# Now we train the model
for epoch in range(a_):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__a : int = 0
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__a : List[str] = model(**a_)
__a : Union[str, Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__a : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(a_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device)
with torch.no_grad():
__a : Optional[int] = model(**a_)
__a : str = outputs.logits.argmax(dim=-1)
__a , __a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=a_ , references=a_ , )
__a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a_)
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(a_),
'''epoch''': epoch,
} , step=a_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __A ( ) -> Optional[Any]:
__a : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=a_ , default=a_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=a_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__a : Optional[int] = parser.parse_args()
__a : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(a_ , a_)
if __name__ == "__main__":
main()
| 160
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = inspect.getfile(accelerate.test_utils )
__a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : int = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Optional[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__a : Union[str, Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = Accelerator()
A = (accelerator.state.process_index + 2, 10)
A = torch.randint(0, 10, shape).to(accelerator.device)
A = ''''''
A = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 160
| 1
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A : Optional[int] = update_area_of_max_square(_lowerCamelCase , col + 1 )
A : str = update_area_of_max_square(row + 1 , col + 1 )
A : List[Any] = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
A : Union[str, Any] = 1 + min([right, diagonal, down] )
A : Tuple = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
A : List[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A : int = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
A : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
A : Dict = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
A : str = 1 + min([right, diagonal, down] )
A : Dict = max(largest_square_area[0] , _lowerCamelCase )
A : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
A : Any = [0]
A : List[Any] = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
A : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A : int = dp_array[row][col + 1]
A : Union[str, Any] = dp_array[row + 1][col + 1]
A : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
A : int = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A : Any = max(dp_array[row][col] , _lowerCamelCase )
else:
A : int = 0
return largest_square_area
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[str] = [0] * (cols + 1)
A : Dict = [0] * (cols + 1)
A : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A : Optional[Any] = current_row[col + 1]
A : Optional[int] = next_row[col + 1]
A : Dict = next_row[col]
if mat[row][col] == 1:
A : Optional[Any] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A : str = max(current_row[col] , _lowerCamelCase )
else:
A : List[str] = 0
A : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 361
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__SCREAMING_SNAKE_CASE = 637_8137.0
__SCREAMING_SNAKE_CASE = 635_6752.31_4245
__SCREAMING_SNAKE_CASE = 6378137
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A : List[str] = haversine_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A : List[Any] = (b_lata + b_lata) / 2
A : Optional[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A : List[str] = (sin(_lowerCamelCase ) ** 2) * (cos(_lowerCamelCase ) ** 2)
A : Optional[int] = cos(sigma / 2 ) ** 2
A : int = (sigma - sin(_lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A : List[str] = (cos(_lowerCamelCase ) ** 2) * (sin(_lowerCamelCase ) ** 2)
A : Union[str, Any] = sin(sigma / 2 ) ** 2
A : int = (sigma + sin(_lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( lowercase , lowercase=False ) -> List[str]:
'''simple docstring'''
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_UpperCAmelCase : Dict = parse_flag_from_env("RUN_SLOW", default=False)
def A ( lowercase ) -> int:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_A )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_A )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_A )
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_A )
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_A )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_A )
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_A )
def A ( lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_A )
def A ( lowercase ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_A )
def A ( lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_A )
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_A )
def A ( lowercase=None , lowercase=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_A , version=_A )
return unittest.skipUnless(is_torch_version('>=' , _A ) , f'''test requires torch version >= {version}''' )(_A )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_A )
def A ( lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_A )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_A )
_UpperCAmelCase : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_A )
class lowercase ( unittest.TestCase ):
__lowercase : Optional[int] = True
@classmethod
def __UpperCamelCase ( cls ) -> str:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = AcceleratorState()
UpperCamelCase = tensor[None].clone().to(state.device )
UpperCamelCase = gather(_A ).cpu()
UpperCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _A ):
return False
return True
class lowercase :
def __init__( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def A ( lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=False ) -> int:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_A ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(lowercase , lowercase , lowercase , lowercase="" ):
UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowercase : tee(_A , _A , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowercase : tee(_A , _A , sys.stderr , label='stderr:' ) ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def A ( lowercase , lowercase=None , lowercase=None , lowercase=180 , lowercase=False , lowercase=True ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
UpperCamelCase = ' '.join(_A )
if result.returncode > 0:
UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowercase ( UpperCAmelCase__ ):
pass
def A ( lowercase , lowercase=False ) -> Any:
'''simple docstring'''
try:
UpperCamelCase = subprocess.check_output(_A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_A , 'decode' ):
UpperCamelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(_A )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 222
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 187
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__lowerCAmelCase = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def snake_case_ ( snake_case ) -> Any:
lowercase__: Optional[int] = EfficientNetConfig()
lowercase__: Union[str, Any] = CONFIG_MAP[model_name]['hidden_dim']
lowercase__: Dict = CONFIG_MAP[model_name]['width_coef']
lowercase__: Any = CONFIG_MAP[model_name]['depth_coef']
lowercase__: Optional[Any] = CONFIG_MAP[model_name]['image_size']
lowercase__: List[Any] = CONFIG_MAP[model_name]['dropout_rate']
lowercase__: Optional[Any] = CONFIG_MAP[model_name]['dw_padding']
lowercase__: Any = 'huggingface/label-files'
lowercase__: Tuple = 'imagenet-1k-id2label.json'
lowercase__: str = 10_00
lowercase__: List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
lowercase__: Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
lowercase__: Tuple = idalabel
lowercase__: List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ) -> Union[str, Any]:
lowercase__: int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__: Union[str, Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
def snake_case_ ( snake_case ) -> List[str]:
lowercase__: Any = CONFIG_MAP[model_name]['image_size']
lowercase__: Any = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=snake_case , )
return preprocessor
def snake_case_ ( snake_case ) -> List[str]:
lowercase__: Any = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__: Optional[Any] = sorted(set(snake_case ) )
lowercase__: int = len(snake_case )
lowercase__: str = {b: str(snake_case ) for b, i in zip(snake_case , range(snake_case ) )}
lowercase__: List[str] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__: Optional[Any] = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__: Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__: Dict = 'efficientnet.' + item[1]
lowercase__: Any = 'classifier.weight'
lowercase__: Any = 'classifier.bias'
return key_mapping
def snake_case_ ( snake_case , snake_case , snake_case ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__: Optional[Any] = torch.from_numpy(snake_case ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__: Any = torch.from_numpy(snake_case ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__: List[str] = torch.from_numpy(np.transpose(snake_case ) )
else:
lowercase__: Any = torch.from_numpy(snake_case )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case )
@torch.no_grad()
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> int:
lowercase__: int = model_classes[model_name](
include_top=snake_case , weights='imagenet' , input_tensor=snake_case , input_shape=snake_case , pooling=snake_case , classes=10_00 , classifier_activation='softmax' , )
lowercase__: Dict = original_model.trainable_variables
lowercase__: List[str] = original_model.non_trainable_variables
lowercase__: str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__: Any = param.numpy()
lowercase__: Dict = list(tf_params.keys() )
# Load HuggingFace model
lowercase__: Optional[Any] = get_efficientnet_config(snake_case )
lowercase__: str = EfficientNetForImageClassification(snake_case ).eval()
lowercase__: Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__: Tuple = rename_keys(snake_case )
replace_params(snake_case , snake_case , snake_case )
# Initialize preprocessor and preprocess input image
lowercase__: int = convert_image_processor(snake_case )
lowercase__: List[str] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__: str = hf_model(**snake_case )
lowercase__: List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase__: Tuple = False
lowercase__: Tuple = CONFIG_MAP[model_name]['image_size']
lowercase__: str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__: Union[str, Any] = image.img_to_array(snake_case )
lowercase__: Tuple = np.expand_dims(snake_case , axis=0 )
lowercase__: Optional[int] = original_model.predict(snake_case )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case , snake_case , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case ):
os.mkdir(snake_case )
# Save converted model and image processor
hf_model.save_pretrained(snake_case )
preprocessor.save_pretrained(snake_case )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
lowercase__: Optional[Any] = f'efficientnet-{model_name}'
preprocessor.push_to_hub(snake_case )
hf_model.push_to_hub(snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 362
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[int] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Tuple = True
__lowercase : str = {}
__lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase__: Optional[int] = {'unk_token': '<unk>'}
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = 'lower newer'
lowercase__: Dict = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Dict = 'lower newer'
lowercase__: Union[str, Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Tuple = tokens + [tokenizer.unk_token]
lowercase__: Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Dict = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__: Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase__: Tuple = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: int = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase__: str = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Tuple = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase__: str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__: Optional[int] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Optional[int] = F'{text_of_1_token} {text_of_1_token}'
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
lowercase__: Any = F' {text}'
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 288
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "xmod"
def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Union[str, Any] = vocab_size
_a : Tuple = hidden_size
_a : List[str] = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : Any = hidden_act
_a : Optional[Any] = intermediate_size
_a : str = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Any = type_vocab_size
_a : Dict = initializer_range
_a : List[str] = layer_norm_eps
_a : int = position_embedding_type
_a : str = use_cache
_a : List[str] = classifier_dropout
_a : Optional[int] = pre_norm
_a : List[Any] = adapter_reduction_factor
_a : Any = adapter_layer_norm
_a : Any = adapter_reuse_layer_norm
_a : List[Any] = ln_before_adapter
_a : Dict = list(_a )
_a : Optional[Any] = default_language
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 235
|
import operator as op
a__ = '''scaler.pt'''
a__ = '''pytorch_model'''
a__ = '''random_states'''
a__ = '''optimizer'''
a__ = '''scheduler'''
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''1.10.2'''
a__ = '''py38'''
a__ = '''4.17.0'''
a__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a__ = '''2.0.1'''
a__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
a__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 235
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ViTImageProcessor if is_vision_available() else None
@property
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = (3, 32, 128)
UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
UpperCAmelCase = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_snake_case , _snake_case )
def snake_case_ ( self , **_snake_case ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self , **_snake_case ) -> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCAmelCase = Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) )
return image_input
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
UpperCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(_snake_case , return_tensors='''np''' )
UpperCAmelCase = processor(images=_snake_case , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = '''test'''
UpperCAmelCase = processor(text=_snake_case )
UpperCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = '''test'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.char_decode(_snake_case )
UpperCAmelCase = tokenizer.batch_decode(_snake_case )
UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = None
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
UpperCAmelCase = torch.randn(1 , 27 , 38 )
UpperCAmelCase = torch.randn(1 , 27 , 5_0257 )
UpperCAmelCase = torch.randn(1 , 27 , 3_0522 )
UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 358
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152
| 0
|
from manim import *
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : int = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : List[Any] = Text('CPU' , font_size=24 )
lowerCamelCase__ : Union[str, Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(1 )]
lowerCamelCase__ : str = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : int = Text('GPU' , font_size=24 )
lowerCamelCase__ : Optional[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Tuple = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : Dict = Text('Model' , font_size=24 )
lowerCamelCase__ : Dict = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
lowerCamelCase__ : Dict = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCamelCase__ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
lowerCamelCase__ : int = []
lowerCamelCase__ : str = []
lowerCamelCase__ : Dict = []
for i, rect in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ : Dict = 0.4_6 / 4
lowerCamelCase__ : List[str] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 50
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 1
|
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ = "" , lowerCAmelCase__ = False ) -> None:
'''simple docstring'''
a__ : dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
a__ : Optional[int] =is_leaf
a__ : str =prefix
def _lowercase ( self , lowerCAmelCase__ ) -> tuple[str, str, str]:
'''simple docstring'''
a__ : Optional[int] =0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if self.prefix == word:
a__ : List[Any] =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
a__ : Tuple =RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
a__ : str =self.nodes[word[0]]
a__ : Optional[Any] =incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
a__ : Optional[Any] =remaining_prefix
a__ : Optional[Any] =self.nodes[matching_string[0]]
a__ : str =RadixNode(snake_case__ , snake_case__ )
a__ : List[str] =aux_node
if remaining_word == "":
a__ : int =True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def _lowercase ( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
a__ : Optional[int] =self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
a__ : Dict =incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def _lowercase ( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
a__ : Tuple =self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
a__ : str =incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
a__ : List[str] =list(self.nodes.values() )[0]
a__ : int =merging_node.is_leaf
self.prefix += merging_node.prefix
a__ : Tuple =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
a__ : Tuple =False
# If there is 1 edge, we merge it with its child
else:
a__ : List[str] =list(incoming_node.nodes.values() )[0]
a__ : Any =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
a__ : Any =merging_node.nodes
return True
def _lowercase ( self , lowerCAmelCase__ = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _A ( ):
"""simple docstring"""
a__ : Tuple ="banana bananas bandana band apple all beast".split()
a__ : Union[str, Any] =RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _A ( ):
"""simple docstring"""
assert test_trie()
def _A ( ):
"""simple docstring"""
a__ : List[str] =RadixNode()
a__ : List[Any] ="banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase )
print("Words:" , __lowerCamelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 360
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """mvp"""
_lowercase : Tuple = ["""past_key_values"""]
_lowercase : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase__=5_0_2_6_7 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=8_0_0 , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
a__ : Dict =vocab_size
a__ : List[str] =max_position_embeddings
a__ : List[str] =d_model
a__ : Optional[Any] =encoder_ffn_dim
a__ : Dict =encoder_layers
a__ : List[str] =encoder_attention_heads
a__ : List[str] =decoder_ffn_dim
a__ : Tuple =decoder_layers
a__ : Tuple =decoder_attention_heads
a__ : Any =dropout
a__ : str =attention_dropout
a__ : str =activation_dropout
a__ : Optional[int] =activation_function
a__ : Union[str, Any] =init_std
a__ : Dict =encoder_layerdrop
a__ : List[str] =decoder_layerdrop
a__ : Union[str, Any] =classifier_dropout
a__ : Union[str, Any] =use_cache
a__ : Optional[Any] =encoder_layers
a__ : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
a__ : int =use_prompt
a__ : Union[str, Any] =prompt_length
a__ : Dict =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowerCAmelCase__ ):
a__ : Tuple =self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 148
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
lowercase_ : List[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowercase : Optional[int] = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 3_0,
"pages": "3979-3990",
"year": 2_0_1_8,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 93
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Dict , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
| 256
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : Union[str, Any] , __a : Dict=13 , __a : Tuple=32 , __a : List[Any]=3 , __a : str=4 , __a : Optional[int]=[10, 20, 30, 40] , __a : str=[2, 2, 3, 2] , __a : Optional[Any]=True , __a : Optional[int]=True , __a : Optional[int]=37 , __a : Dict="gelu" , __a : Union[str, Any]=10 , __a : Tuple=0.02 , __a : int=["stage2", "stage3", "stage4"] , __a : List[Any]=3 , __a : Dict=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = type_sequence_label_size
_a = initializer_range
_a = out_features
_a = num_labels
_a = scope
_a = num_stages
def UpperCamelCase__ ( self : Any ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : List[Any] ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCamelCase__ ( self : List[str] ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCamelCase__ ( self : str , __a : Dict , __a : List[str] , __a : Optional[Any] ):
_a = UperNetForSemanticSegmentation(config=__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
__a ={'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__a =False
__a =False
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : int ):
_a = UperNetModelTester(self )
_a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self : List[str] ):
return
def UpperCamelCase__ ( self : Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def UpperCamelCase__ ( self : Tuple ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def UpperCamelCase__ ( self : str ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCamelCase__ ( self : str ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCamelCase__ ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self : Any ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
def check_hidden_states_output(__a : Dict , __a : Optional[Any] , __a : Dict ):
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ ( self : List[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__a )
_a = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a = model_class(config=__a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def UpperCamelCase__ ( self : int ):
pass
@slow
def UpperCamelCase__ ( self : List[str] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = UperNetForSemanticSegmentation.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ) -> Dict:
_a = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
_a = Image.open(lowercase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
_a = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__a )
_a = prepare_img()
_a = processor(images=__a , return_tensors="pt" ).to(__a )
with torch.no_grad():
_a = model(**__a )
_a = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __a )
_a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4 ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
_a = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__a )
_a = prepare_img()
_a = processor(images=__a , return_tensors="pt" ).to(__a )
with torch.no_grad():
_a = model(**__a )
_a = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __a )
_a = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4 ) )
| 346
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any:
stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return arr
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCamelCase , i + t , (__lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 288
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'markuplm'
def __init__( self : Dict , _A : Union[str, Any]=30_522 , _A : Any=768 , _A : Optional[int]=12 , _A : str=12 , _A : Any=3_072 , _A : int="gelu" , _A : int=0.1 , _A : Optional[int]=0.1 , _A : Dict=512 , _A : Dict=2 , _A : Union[str, Any]=0.0_2 , _A : Dict=1e-12 , _A : Optional[Any]=0 , _A : Tuple=0 , _A : Dict=2 , _A : Any=256 , _A : Tuple=1_024 , _A : Union[str, Any]=216 , _A : str=1_001 , _A : str=32 , _A : Optional[Any]=50 , _A : Tuple="absolute" , _A : str=True , _A : Tuple=None , **_A : Tuple , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Optional[int] = position_embedding_type
UpperCAmelCase__ : int = use_cache
UpperCAmelCase__ : int = classifier_dropout
# additional properties
UpperCAmelCase__ : Union[str, Any] = max_depth
UpperCAmelCase__ : Tuple = max_xpath_tag_unit_embeddings
UpperCAmelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCAmelCase__ : List[str] = tag_pad_id
UpperCAmelCase__ : Union[str, Any] = subs_pad_id
UpperCAmelCase__ : List[Any] = xpath_unit_hidden_size
| 299
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(lowerCAmelCase__ ),
'''validation''': dataset['''train'''].select(lowerCAmelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Dict = 8
else:
UpperCAmelCase__ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# New Code #
UpperCAmelCase__ : List[str] = []
# Download the dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Any = config['''lr''']
UpperCAmelCase__ : Any = int(config['''num_epochs'''] )
UpperCAmelCase__ : Any = int(config['''seed'''] )
UpperCAmelCase__ : Dict = int(config['''batch_size'''] )
UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
# New Code #
# Create our folds:
UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase__ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.loss
UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase__ : int = []
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 )
UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 299
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''dandelin/vilt-b32-finetuned-vqa'''
__lowerCAmelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__lowerCAmelCase = '''image_qa'''
__lowerCAmelCase = AutoProcessor
__lowerCAmelCase = AutoModelForVisualQuestionAnswering
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(__lowercase , __lowercase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
return self.model(**__lowercase ).logits
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 160
|
'''simple docstring'''
import socket
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ : str =socket.gethostname()
SCREAMING_SNAKE_CASE__ : List[Any] =1_2_3_1_2
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE__ : List[str] =sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 152
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCAmelCase__ = 8
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
"""simple docstring"""
UpperCamelCase = x.device
UpperCamelCase = (x * 255).int().clamp(0 , 255 )
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "d -> d 1 1" )
UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b c h w -> b c 1 h w" )
UpperCamelCase = ((x & mask) != 0).float()
UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b c d h w -> b (c d) h w" )
UpperCamelCase = bits * 2 - 1
return bits
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
"""simple docstring"""
UpperCamelCase = x.device
UpperCamelCase = (x > 0).int()
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "d -> d 1 1" )
UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b (c d) h w -> b c d h w" , d=8 )
UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def a__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[timestep]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCamelCase = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCamelCase = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else "cpu"
UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def a__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCamelCase , UpperCamelCase = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
UpperCamelCase = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a = 1.0 , ) -> Tuple:
super().__init__()
UpperCamelCase = bit_scale
UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self , __a = 2_56 , __a = 2_56 , __a = 50 , __a = None , __a = 1 , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
UpperCamelCase = decimal_to_bits(__a ) * self.bit_scale
UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCamelCase = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCamelCase = bits_to_decimal(__a )
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 244
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "falcon"
UpperCAmelCase_ = ["past_key_values"]
def __init__(self , __a=6_50_24 , __a=45_44 , __a=32 , __a=71 , __a=1e-5 , __a=0.02 , __a=True , __a=0.0 , __a=0.0 , __a=None , __a=False , __a=False , __a=True , __a=True , __a=False , __a=11 , __a=11 , **__a , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase = kwargs.pop("n_embed" , __a )
UpperCamelCase = hidden_size if n_embed is None else n_embed
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase = alibi
UpperCamelCase = new_decoder_architecture
UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase = parallel_attn
UpperCamelCase = bias
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case_ (self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def snake_case_ (self ) -> Dict:
return not self.alibi
| 244
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase_ = logging.WARNING
def _snake_case( ) -> List[Any]:
'''simple docstring'''
A__ = os.getenv('DATASETS_VERBOSITY' , SCREAMING_SNAKE_CASE__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option DATASETS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _snake_case( ) -> str:
'''simple docstring'''
return __name__.split('.' )[0]
def _snake_case( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
A__ = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> List[str]:
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Any:
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> int:
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Any:
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = False
def _snake_case( ) -> None:
'''simple docstring'''
A__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A :
"""simple docstring"""
def __init__( self : int,*lowercase_ : List[str],**lowercase_ : Optional[int] )-> int: # pylint: disable=unused-argument
'''simple docstring'''
A__ = args[0] if args else None
def __iter__( self : Optional[Any] )-> int:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Any,lowercase_ : Tuple )-> Union[str, Any]:
'''simple docstring'''
def empty_fn(*lowercase_ : Any,**lowercase_ : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int )-> Tuple:
'''simple docstring'''
return self
def __exit__( self : str,lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return
lowercase_ = True
class A :
"""simple docstring"""
def __call__( self : str,*lowercase_ : Dict,lowercase_ : List[str]=False,**lowercase_ : List[Any] )-> Any:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase_,**lowercase_ )
else:
return EmptyTqdm(*lowercase_,**lowercase_ )
def snake_case__ ( self : Optional[int],*lowercase_ : str,**lowercase_ : Any )-> str:
'''simple docstring'''
A__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_,**lowercase_ )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase_ = _tqdm_cls()
def _snake_case( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _snake_case( ) -> int:
'''simple docstring'''
global _tqdm_active
A__ = True
def _snake_case( ) -> Tuple:
'''simple docstring'''
global _tqdm_active
A__ = False
| 7
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Tuple = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148
| 0
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase : Optional[List[str]] =None
__lowerCAmelCase : List[Any] ="<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase : Tuple =[
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _A :
snake_case__ : bool = True
snake_case__ : Optional[str] = None
# Automatically constructed
snake_case__ : ClassVar[str] = "PIL.Image.Image"
snake_case__ : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
snake_case__ : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(a_ , a_ ):
lowercase = np.array(a_ )
if isinstance(a_ , a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_ , a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase = {}
lowercase , lowercase = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
lowercase = PIL.Image.open(a_ )
else:
lowercase = path.split("""::""" )[-1]
try:
lowercase = string_to_dict(a_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase = token_per_repo_id.get(a_ )
except ValueError:
lowercase = None
with xopen(a_ , """rb""" , use_auth_token=a_ ) as f:
lowercase = BytesIO(f.read() )
lowercase = PIL.Image.open(bytes_ )
else:
lowercase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowercase = pa.array([None] * len(a_ ) , type=pa.binary() )
lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase = pa.array([None] * len(a_ ) , type=pa.string() )
lowercase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase = storage.field("""bytes""" )
else:
lowercase = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase = storage.field("""path""" )
else:
lowercase = pa.array([None] * len(a_ ) , type=pa.string() )
lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase = pa.array(
[encode_np_array(np.array(a_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase = pa.array([None] * len(a_ ) , type=pa.string() )
lowercase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase ):
with xopen(a_ , """rb""" ) as f:
lowercase = f.read()
return bytes_
lowercase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase__ ( lowerCAmelCase__ :"PIL.Image.Image" ) -> bytes:
'''simple docstring'''
lowercase = BytesIO()
if image.format in list_image_compression_formats():
lowercase = image.format
else:
lowercase = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def UpperCAmelCase__ ( lowerCAmelCase__ :"PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def UpperCAmelCase__ ( lowerCAmelCase__ :np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase = array.dtype
lowercase = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase = dtype.kind
lowercase = dtype.itemsize
lowercase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
lowercase = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase , lowercase = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
lowercase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
lowercase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 359
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=32 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[Any]=[10, 20, 30, 40] , _UpperCAmelCase : Any=[2, 2, 3, 2] , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Dict=["stage2", "stage3", "stage4"] , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Any=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_stages
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = out_features
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = num_stages
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = UperNetForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ : str = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = UperNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
UpperCAmelCase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(_UpperCAmelCase )
UpperCAmelCase__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
UpperCAmelCase__ = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_UpperCAmelCase )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase__ = model(**_UpperCAmelCase )
UpperCAmelCase__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_UpperCAmelCase )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase__ = model(**_UpperCAmelCase )
UpperCAmelCase__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 346
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346
| 1
|
from __future__ import annotations
_lowercase : str =tuple[int, int, int]
_lowercase : List[str] =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowercase : Optional[int] ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowercase : Union[str, Any] ="EGZWVONAHDCLFQMSIPJBYUKXTR"
_lowercase : Optional[int] ="FOBHMDKEXQNRAULPGSJVTYICZW"
_lowercase : Optional[int] ="ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_lowercase : List[Any] ={
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_lowercase : str ="RMDJXFUWGISLHVTCQNKYPBEZOA"
_lowercase : int ="SGLCPQWZHKXAREONTFBVIYJUDM"
_lowercase : Any ="HVSICLTYKQUBXDWAJZOMFGPREN"
_lowercase : Union[str, Any] ="RZWQHFMVDBKICJLNTUXAGYPSOE"
_lowercase : int ="LFKIJODBEGAMQPXVUHYSTCZRWN"
_lowercase : int ="KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowerCAmelCase_ ( _lowercase : RotorPositionT , _lowercase : RotorSelectionT , _lowercase : str) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowercase))) < 3:
a__ : Dict = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowercase)
# Checks if rotor positions are valid
a__ , a__ , a__ : Optional[int] = rotpos
if not 0 < rotorposa <= len(_lowercase):
a__ : Tuple = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowercase)
if not 0 < rotorposa <= len(_lowercase):
a__ : Union[str, Any] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase)
if not 0 < rotorposa <= len(_lowercase):
a__ : Any = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase)
# Validates string and returns dict
a__ : List[Any] = _plugboard(_lowercase)
return rotpos, rotsel, pbdict
def lowerCAmelCase_ ( _lowercase : str) -> dict[str, str]:
"""simple docstring"""
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowercase , _lowercase):
a__ : str = F'''Plugboard setting isn\'t type string ({type(_lowercase)})'''
raise TypeError(_lowercase)
elif len(_lowercase) % 2 != 0:
a__ : Any = F'''Odd number of symbols ({len(_lowercase)})'''
raise Exception(_lowercase)
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""")
# Checks if all characters are unique
a__ : Union[str, Any] = set()
for i in pbstring:
if i not in abc:
a__ : str = F'''\'{i}\' not in list of symbols'''
raise Exception(_lowercase)
elif i in tmppbl:
a__ : Optional[Any] = F'''Duplicate symbol ({i})'''
raise Exception(_lowercase)
else:
tmppbl.add(_lowercase)
del tmppbl
# Created the dictionary
a__ : Dict = {}
for j in range(0 , len(_lowercase) - 1 , 2):
a__ : Optional[Any] = pbstring[j + 1]
a__ : Optional[Any] = pbstring[j]
return pb
def lowerCAmelCase_ ( _lowercase : str , _lowercase : RotorPositionT , _lowercase : RotorSelectionT = (rotora, rotora, rotora) , _lowercase : str = "" , ) -> str:
"""simple docstring"""
a__ : str = text.upper()
a__ , a__ , a__ : str = _validator(
_lowercase , _lowercase , plugb.upper())
a__ , a__ , a__ : List[str] = rotor_position
a__ , a__ , a__ : Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
a__ : Union[str, Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
a__ : List[str] = plugboard[symbol]
# rotor ra --------------------------
a__ : Union[str, Any] = abc.index(_lowercase) + rotorposa
a__ : List[str] = rotora[index % len(_lowercase)]
# rotor rb --------------------------
a__ : Dict = abc.index(_lowercase) + rotorposa
a__ : Union[str, Any] = rotora[index % len(_lowercase)]
# rotor rc --------------------------
a__ : Optional[int] = abc.index(_lowercase) + rotorposa
a__ : List[str] = rotora[index % len(_lowercase)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
a__ : int = reflector[symbol]
# 2nd rotors
a__ : Optional[Any] = abc[rotora.index(_lowercase) - rotorposa]
a__ : List[str] = abc[rotora.index(_lowercase) - rotorposa]
a__ : str = abc[rotora.index(_lowercase) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
a__ : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase):
a__ : Dict = 0
rotorposa += 1
if rotorposa >= len(_lowercase):
a__ : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowercase):
a__ : Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase)
return "".join(_lowercase)
if __name__ == "__main__":
_lowercase : Optional[int] ="This is my Python script that emulates the Enigma machine from WWII."
_lowercase : Optional[int] =(1, 1, 1)
_lowercase : Union[str, Any] ="pictures"
_lowercase : str =(rotora, rotora, rotora)
_lowercase : Any =enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 266
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase : Union[str, Any] =data_utils.TransfoXLTokenizer
_lowercase : Tuple =data_utils.TransfoXLCorpus
_lowercase : Optional[int] =data_utils
_lowercase : int =data_utils
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowercase , """rb""") as fp:
a__ : Optional[Any] = pickle.load(_lowercase , encoding="""latin1""")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : Dict = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''')
a__ : List[str] = corpus.vocab.__dict__
torch.save(_lowercase , _lowercase)
a__ : Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , _lowercase)
a__ : Tuple = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''')
torch.save(_lowercase , _lowercase)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Optional[Any] = os.path.abspath(_lowercase)
a__ : str = os.path.abspath(_lowercase)
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''')
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : List[str] = TransfoXLConfig()
else:
a__ : Any = TransfoXLConfig.from_json_file(_lowercase)
print(F'''Building PyTorch model from configuration: {config}''')
a__ : Any = TransfoXLLMHeadModel(_lowercase)
a__ : Optional[Any] = load_tf_weights_in_transfo_xl(_lowercase , _lowercase , _lowercase)
# Save pytorch-model
a__ : Tuple = os.path.join(_lowercase , _lowercase)
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
print(F'''Save PyTorch model to {os.path.abspath(_lowercase)}''')
torch.save(model.state_dict() , _lowercase)
print(F'''Save configuration file to {os.path.abspath(_lowercase)}''')
with open(_lowercase , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
_lowercase : Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 266
| 1
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1, 1
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = fa + fa
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""\'float\' object cannot be interpreted as an integer""" )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""\'str\' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
UpperCAmelCase__ : Tuple = False
if num < 0:
UpperCAmelCase__ : str = True
UpperCAmelCase__ : str = -num
UpperCAmelCase__ : str = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowerCamelCase ) for e in binary )
return "0b" + "".join(str(__lowerCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (64,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 256 , _lowerCamelCase = 32 , _lowerCamelCase = None , _lowerCamelCase = 0.18215 , _lowerCamelCase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : str = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
UpperCAmelCase__ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
# pass init params to Decoder
UpperCAmelCase__ : str = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Union[str, Any] = self.encoder(_lowerCamelCase)
UpperCAmelCase__ : str = self.quant_conv(_lowerCamelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase)
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.quantize(_lowerCamelCase)
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(_lowerCamelCase)
UpperCAmelCase__ : Any = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : Dict = self.encode(_lowerCamelCase).latents
UpperCAmelCase__ : List[str] = self.decode(_lowerCamelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
| 283
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCamelCase_ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCamelCase_ = '''UperNetConfig'''
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , ):
super().__init__()
UpperCamelCase__ = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.ReLU()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.batch_norm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.activation(SCREAMING_SNAKE_CASE_ )
return output
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
UpperCamelCase__ = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE_ ),
UperNetConvModule(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = input
for layer in self.layers:
UpperCamelCase__ = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
UpperCamelCase__ = pool_scales
UpperCamelCase__ = align_corners
UpperCamelCase__ = in_channels
UpperCamelCase__ = channels
UpperCamelCase__ = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , channels=SCREAMING_SNAKE_CASE_ )
self.blocks.append(SCREAMING_SNAKE_CASE_ )
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for ppm in self.blocks:
UpperCamelCase__ = ppm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE_ )
return ppm_outs
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase__ = in_channels
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = False
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase__ = UperNetConvModule(SCREAMING_SNAKE_CASE_ , self.channels , kernel_size=1 )
UpperCamelCase__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE_ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase_ (self ):
self.apply(self._init_weights )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = inputs[-1]
UpperCamelCase__ = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
UpperCamelCase__ = self.bottleneck(SCREAMING_SNAKE_CASE_ )
return output
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# build laterals
UpperCamelCase__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE_ ) )
# build top-down path
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = laterals[i - 1].shape[2:]
UpperCamelCase__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE_ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
UpperCamelCase__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
UpperCamelCase__ = self.fpn_bottleneck(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.auxiliary_in_channels
UpperCamelCase__ = config.auxiliary_channels
UpperCamelCase__ = config.auxiliary_num_convs
UpperCamelCase__ = config.auxiliary_concat_input
UpperCamelCase__ = in_index
UpperCamelCase__ = (kernel_size // 2) * dilation
UpperCamelCase__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
if self.num_convs == 0:
UpperCamelCase__ = nn.Identity()
else:
UpperCamelCase__ = nn.Sequential(*SCREAMING_SNAKE_CASE_ )
if self.concat_input:
UpperCamelCase__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 )
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase_ (self ):
self.apply(self._init_weights )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# just take the relevant feature maps
UpperCamelCase__ = encoder_hidden_states[self.in_index]
UpperCamelCase__ = self.convs(SCREAMING_SNAKE_CASE_ )
if self.concat_input:
UpperCamelCase__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UperNetConfig
SCREAMING_SNAKE_CASE__ = """pixel_values"""
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase_ (self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = value
lowerCamelCase_ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __lowerCamelCase , )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase__ = UperNetHead(SCREAMING_SNAKE_CASE_ , in_channels=self.backbone.channels )
UpperCamelCase__ = UperNetFCNHead(SCREAMING_SNAKE_CASE_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase__ = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.feature_maps
UpperCamelCase__ = self.decode_head(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.functional.interpolate(SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = None
if self.auxiliary_head is not None:
UpperCamelCase__ = self.auxiliary_head(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCamelCase__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase__ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase__ = (logits,) + outputs[1:]
else:
UpperCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 244
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __a : Union[str, Any] , __a : Any , __a : Union[str, Any]=None ):
'''simple docstring'''
if rng is None:
UpperCamelCase__ = random.Random()
UpperCamelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCamelCase__ = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase__ = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def __magic_name__ ( __a : Dict , __a : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
UpperCamelCase__ = 1
return attn_mask
@require_flax
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = ()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase__ = 2
UpperCamelCase__ = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , flax_model.params )
UpperCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE_ ).sequences
UpperCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
UpperCamelCase__ = 0.8
UpperCamelCase__ = 10
UpperCamelCase__ = 0.3
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = 2
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """do_samples""" ):
model.generate(SCREAMING_SNAKE_CASE_ , do_samples=SCREAMING_SNAKE_CASE_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """foo""" ):
UpperCamelCase__ = {"""foo""": """bar"""}
model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = torch.load(__a , map_location='cpu' )
if "model" in sd.keys():
snake_case_ : List[Any] = torch.load(__a , map_location='cpu' )['model']
# pop unnecessary weights
snake_case_ : Any = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__a )
snake_case_ : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case_ : Optional[int] = sd.pop(__a )
snake_case_ : int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case_ : Optional[int] = sd[key]
# We split QKV in separate Q,K,V
snake_case_ : Dict = key.replace('.qkv_proj.' , '.q_proj.' )
snake_case_ : Dict = key.replace('.qkv_proj.' , '.k_proj.' )
snake_case_ : Union[str, Any] = key.replace('.qkv_proj.' , '.v_proj.' )
snake_case_ : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case_ ,snake_case_ ,snake_case_ : Optional[Any] = torch.split(__a , depth // 3 , dim=0 )
snake_case_ : Union[str, Any] = q
snake_case_ : List[str] = k
snake_case_ : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None ):
snake_case_ : Any = load_checkpoint(__a )
if config is not None:
snake_case_ : Any = OPTConfig.from_pretrained(__a )
else:
snake_case_ : Optional[int] = OPTConfig()
snake_case_ : List[Any] = OPTModel(__a ).half().eval()
model.load_state_dict(__a )
# Check results
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 88
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["image_processor", "tokenizer"]
__magic_name__: Optional[Any] = "LayoutLMv3ImageProcessor"
__magic_name__: str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , _A : List[str]=None , _A : Dict=None , **_A : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
snake_case_ : Any = kwargs.pop('feature_extractor' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Optional[Any] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
snake_case_ : str = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : str = features['words']
snake_case_ : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case_ : Dict = self.get_overflowing_images(_A , encoded_inputs['overflow_to_sample_mapping'] )
snake_case_ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def UpperCAmelCase_ ( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Dict , **_A : str ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 88
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.