code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
a_ : Union[str, Any] = 42
a_ : Union[str, Any] = None
a_ : List[str] = None
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = Node(1 )
a_ = Node(2 )
a_ = Node(3 )
a_ = Node(4 )
a_ = Node(5 )
return tree
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = []
if root is None:
return output
a_ = deque([root] )
while process_queue:
a_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = []
def populate_output(UpperCAmelCase , UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = []
def populate_output(UpperCAmelCase , UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
if root is None:
return []
a_ = []
a_ = 0
a_ = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
a_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
a_ = 0
return output
def UpperCamelCase ( ) ->int: # Main function for testing.
"""simple docstring"""
a_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , "\n" )
print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(UpperCamelCase__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case ( _lowerCamelCase ):
a_ : Dict = """time_series_transformer"""
a_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "student_t" , __UpperCAmelCase = "nll" , __UpperCAmelCase = 1 , __UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 64 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1_00 , __UpperCAmelCase = 0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) ->Optional[Any]:
a_ = prediction_length
a_ = context_length or prediction_length
a_ = distribution_output
a_ = loss
a_ = input_size
a_ = num_time_features
a_ = lags_sequence
a_ = scaling
a_ = num_dynamic_real_features
a_ = num_static_real_features
a_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
a_ = cardinality
else:
a_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
a_ = embedding_dimension
else:
a_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
a_ = num_parallel_samples
# Transformer architecture configuration
a_ = input_size * len(__UpperCAmelCase) + self._number_of_features
a_ = d_model
a_ = encoder_attention_heads
a_ = decoder_attention_heads
a_ = encoder_ffn_dim
a_ = decoder_ffn_dim
a_ = encoder_layers
a_ = decoder_layers
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = activation_function
a_ = init_std
a_ = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase)
@property
def UpperCAmelCase__ ( self) ->List[Any]:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 353
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 4 , __UpperCAmelCase=32 * 6 , __UpperCAmelCase=4 , __UpperCAmelCase=32 , ) ->Tuple:
a_ = parent
a_ = batch_size
a_ = is_training
a_ = use_auxiliary_loss
a_ = num_queries
a_ = num_channels
a_ = min_size
a_ = max_size
a_ = num_labels
a_ = mask_feature_size
def UpperCAmelCase__ ( self ) ->Dict:
a_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
a_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase )
a_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5
).float()
a_ = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long()
a_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self ) ->str:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self ) ->List[str]:
a_ = self.prepare_config_and_inputs()
a_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase ) ->Optional[int]:
a_ = output.encoder_hidden_states
a_ = output.pixel_decoder_hidden_states
a_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_config.decoder_layers )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) ->int:
with torch.no_grad():
a_ = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a_ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
a_ = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ->Optional[Any]:
a_ = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a_ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
a_ = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
a_ = model(
pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
a_ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ : Union[str, Any] = False
a_ : Tuple = False
a_ : str = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self ) ->Optional[Any]:
a_ = MaskFormerModelTester(self )
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def UpperCAmelCase__ ( self ) ->List[str]:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def UpperCAmelCase__ ( self ) ->Optional[Any]:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def UpperCAmelCase__ ( self ) ->Dict:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def UpperCAmelCase__ ( self ) ->int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCAmelCase__ ( self ) ->int:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase__ ( self ) ->Union[str, Any]:
pass
def UpperCAmelCase__ ( self ) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ) ->Any:
for model_name in ["facebook/maskformer-swin-small-coco"]:
a_ = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->Tuple:
a_ = (self.model_tester.min_size,) * 2
a_ = {
"pixel_values": torch.randn((2, 3, *size) , device=__UpperCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__UpperCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(),
}
a_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
a_ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self ) ->Any:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->str:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
a_ = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self ) ->int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a_ = self.all_model_classes[1]
a_ = self.model_tester.prepare_config_and_inputs()
a_ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a_ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self ) ->Any:
a_ = self.all_model_classes[1]
a_ = self.model_tester.prepare_config_and_inputs()
a_ = True
a_ = True
a_ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a_ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
a_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase_ = 1E-4
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ) ->Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def UpperCAmelCase__ ( self ) ->Tuple:
a_ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__UpperCAmelCase )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
a_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
a_ = model(**__UpperCAmelCase )
a_ = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
a_ = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
a_ = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def UpperCAmelCase__ ( self ) ->Optional[Any]:
a_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__UpperCAmelCase )
.eval()
)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
a_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
a_ = model(**__UpperCAmelCase )
# masks_queries_logits
a_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a_ = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
a_ = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
a_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a_ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def UpperCAmelCase__ ( self ) ->Dict:
a_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__UpperCAmelCase )
.eval()
)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
a_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
a_ = model(**__UpperCAmelCase )
# masks_queries_logits
a_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a_ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
a_ = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
a_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a_ = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def UpperCAmelCase__ ( self ) ->Any:
a_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__UpperCAmelCase )
.eval()
)
a_ = self.default_image_processor
a_ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
a_ = inputs["pixel_values"].to(__UpperCAmelCase )
a_ = [el.to(__UpperCAmelCase ) for el in inputs["mask_labels"]]
a_ = [el.to(__UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a_ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 354
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
class snake_case :
def __init__( self , __UpperCAmelCase) ->List[str]:
a_ = set_counts
a_ = max(lowerCamelCase_)
a_ = len(lowerCamelCase_)
a_ = [1] * num_sets
a_ = list(range(lowerCamelCase_))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self.get_parent(lowerCamelCase_)
a_ = self.get_parent(lowerCamelCase_)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
a_ = 0
a_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
a_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
a_ = 0
a_ = src_parent
a_ = self.set_counts[src_parent]
a_ = max(self.max_set , lowerCamelCase_)
return True
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
if self.parents[disj_set] == disj_set:
return disj_set
a_ = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase_ = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=__UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = tmp_path_factory.getbasetemp() / "cache"
a_ = test_hf_cache_home / "datasets"
a_ = test_hf_cache_home / "metrics"
a_ = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(__UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(__UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(__UpperCAmelCase ) )
a_ = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(__UpperCAmelCase ) )
a_ = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase , scope="session" )
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , __UpperCAmelCase )
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , __UpperCAmelCase )
| 356
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
a_ = getattr(lowercase_ , lowercase_ ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
a_ = None
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == "group" , )
a_ = True
elif name.split("." )[0] == "proj":
a_ = fairseq_model.proj
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(lowercase_ )[0].split("." )[-2]
a_ = mapped_key.replace("*" , lowercase_ )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
a_ = "weight"
else:
a_ = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ , a_ = emb.weight.shape
a_ = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
a_ = emb.weight.data
return lin_layer
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
with open(lowercase_ , "r" , encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [line.split(" " )[0] for line in lines]
a_ = len(lowercase_ )
a_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(lowercase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Dict:
"""simple docstring"""
a_ = WavaVecaConfig.from_pretrained(lowercase_ )
a_ = SpeechaTextaConfig.from_pretrained(
lowercase_ , vocab_size=lowercase_ , decoder_layers=lowercase_ , do_stable_layer_norm=lowercase_ )
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
a_ = model[0].eval()
# set weights for wav2vec2 encoder
a_ = WavaVecaModel(lowercase_ )
a_ = recursively_load_weights_wavaveca(model.encoder , lowercase_ )
a_ = SpeechaTextaForCausalLM(lowercase_ )
a_ , a_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
a_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
a_ = SpeechEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
a_ = False
# add projection layer
a_ = nn.Parameter(projection_layer.weight )
a_ = nn.Parameter(projection_layer.bias )
a_ = create_vocab_dict(lowercase_ )
with open(os.path.join(lowercase_ , "vocab.json" ) , "w" ) as fp:
json.dump(lowercase_ , lowercase_ )
a_ = SpeechaTextaTokenizer(os.path.join(lowercase_ , "vocab.json" ) )
tokenizer.save_pretrained(lowercase_ )
a_ = hf_wavavec.config.to_dict()
a_ = tokenizer.pad_token_id
a_ = tokenizer.bos_token_id
a_ = tokenizer.eos_token_id
a_ = "speech_to_text_2"
a_ = "wav2vec2"
a_ = SpeechEncoderDecoderConfig.from_dict(lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 357
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( a_ , unittest.TestCase ):
a_ : Union[str, Any] = PegasusTokenizer
a_ : str = PegasusTokenizerFast
a_ : Optional[int] = True
a_ : int = True
def UpperCAmelCase__ ( self) ->int:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def UpperCAmelCase__ ( self) ->Dict:
return PegasusTokenizer.from_pretrained("google/pegasus-large")
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->int:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self) ->str:
a_ = '''</s>'''
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "</s>")
self.assertEqual(vocab_keys[-1] , "v")
self.assertEqual(len(lowercase_) , 11_03)
def UpperCAmelCase__ ( self) ->Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname)
a_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
a_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
a_ = tokenizer([raw_input_str] , return_tensors=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
a_ = '''To ensure a smooth flow of bank resolutions.'''
a_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
a_ = tokenizer([raw_input_str] , return_tensors=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = ['''This is going to be way too long.''' * 1_50, '''short example''']
a_ = ['''not super long but more than 5 tokens''', '''tiny''']
a_ = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
a_ = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ ( self) ->str:
# fmt: off
a_ = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class snake_case ( a_ , unittest.TestCase ):
a_ : Tuple = PegasusTokenizer
a_ : Optional[int] = PegasusTokenizerFast
a_ : Optional[Any] = True
a_ : Dict = True
def UpperCAmelCase__ ( self) ->str:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(lowercase_ , offset=0 , mask_token_sent=lowercase_ , mask_token="[MASK]")
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def UpperCAmelCase__ ( self) ->List[str]:
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv")
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->int:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self) ->Any:
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname)
a_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
@require_torch
def UpperCAmelCase__ ( self) ->str:
a_ = ['''This is going to be way too long.''' * 10_00, '''short example''']
a_ = ['''not super long but more than 5 tokens''', '''tiny''']
a_ = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
a_ = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
a_ = self._large_tokenizer(lowercase_).input_ids
self.assertListEqual(
lowercase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
from PIL import Image
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCAmelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCamelCase_ = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 359
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
UpperCamelCase_ = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCAmelCase ) )
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(UpperCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
UpperCamelCase_ = False
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase=32) ->Union[str, Any]:
set_seed(0)
a_ = UNetaDModel(sample_size=lowerCamelCase_ , in_channels=3 , out_channels=3)
a_ = torch.optim.SGD(model.parameters() , lr=0.0_001)
return model, optimizer
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
a_ = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase_ , )
a_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
a_ = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(lowerCamelCase_) for _ in range(4)]
a_ = [torch.randn((4, 3, 32, 32)).to(lowerCamelCase_) for _ in range(4)]
a_ = [torch.randint(0 , 10_00 , (4,)).long().to(lowerCamelCase_) for _ in range(4)]
# train with a DDPM scheduler
a_ , a_ = self.get_model_optimizer(resolution=32)
model.train().to(lowerCamelCase_)
for i in range(4):
optimizer.zero_grad()
a_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
a_ = model(lowerCamelCase_ , timesteps[i]).sample
a_ = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
a_ , a_ = self.get_model_optimizer(resolution=32)
model.train().to(lowerCamelCase_)
for i in range(4):
optimizer.zero_grad()
a_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
a_ = model(lowerCamelCase_ , timesteps[i]).sample
a_ = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5))
| 361
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str | None:
"""simple docstring"""
a_ = ""
a_ = 42
a_ = 42
a_ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
a_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def UpperCamelCase ( UpperCAmelCase ) ->list[str]:
"""simple docstring"""
a_ = []
for key in product(__lowerCAmelCase , repeat=3 ):
a_ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase ( UpperCAmelCase = "p059_cipher.txt" ) ->int:
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="utf-8" )
a_ = [int(__lowerCAmelCase ) for number in data.strip().split("," )]
a_ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
a_ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
a_ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''PerceiverFeatureExtractor''']
UpperCamelCase_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase ( UpperCAmelCase ) ->Any: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def UpperCamelCase ( UpperCAmelCase ) ->Dict: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class snake_case :
a_ : int
a_ : str
class snake_case ( lowerCAmelCase__ ):
def UpperCAmelCase__ ( self) ->int:
a_ = {}
a_ = []
a_ = 1
a_ = [1, 2]
a_ = {"a": 1, "b": 2}
a_ = {"a": [1, 2], "b": [3, 4]}
a_ = {"a": {"1": 1}, "b": 2}
a_ = {"a": 1, "b": 2, "c": 3, "d": 4}
a_ = {}
a_ = []
a_ = 2
a_ = [2, 3]
a_ = {"a": 2, "b": 3}
a_ = {"a": [2, 3], "b": [4, 5]}
a_ = {"a": {"1": 2}, "b": 3}
a_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
a_ = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
a_ = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
a_ = {"a": 2, "b": 0, "c": 2}
a_ = {
"a": np.eye(2).astype(a__),
"b": np.zeros(3).astype(a__),
"c": np.ones(2).astype(a__),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__): # can't pickle a local lambda
map_nested(lambda __UpperCAmelCase: x + 1 , a__ , num_proc=a__)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = {"a": 1, "b": 2}
a_ = {"a": 3, "b": 4}
a_ = {"a": 5, "b": 6}
a_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(a__ , a__ , a__)) , a__)
def UpperCAmelCase__ ( self) ->Tuple:
class snake_case :
a_ : Dict = "bar"
a_ = Foo()
self.assertEqual(foo.my_attr , "bar")
with temporary_assignment(a__ , "my_attr" , "BAR"):
self.assertEqual(foo.my_attr , "BAR")
self.assertEqual(foo.my_attr , "bar")
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
a_ = {F'''{i}''': i for i in range(UpperCAmelCase )}
a_ = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( lowerCAmelCase__ ):
@require_tf
def UpperCAmelCase__ ( self) ->List[str]:
import tensorflow as tf
from tensorflow.keras import layers
a_ = layers.Dense(2)
def gen_random_output():
a_ = tf.random.uniform((1, 3))
return model(a__).numpy()
with temp_seed(42 , set_tensorflow=a__):
a_ = gen_random_output()
with temp_seed(42 , set_tensorflow=a__):
a_ = gen_random_output()
a_ = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def UpperCAmelCase__ ( self) ->Any:
import torch
def gen_random_output():
a_ = torch.nn.Linear(3 , 2)
a_ = torch.rand(1 , 3)
return model(a__).detach().numpy()
with temp_seed(42 , set_pytorch=a__):
a_ = gen_random_output()
with temp_seed(42 , set_pytorch=a__):
a_ = gen_random_output()
a_ = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def UpperCAmelCase__ ( self) ->List[str]:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
a_ = gen_random_output()
with temp_seed(42):
a_ = gen_random_output()
a_ = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("input_data" , [{}] )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
a_ = A(x=1 , y="foobar" )
a_ = {"x": 1, "y": "foobar"}
assert asdict(UpperCAmelCase ) == expected_output
a_ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
a_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
return text.split()
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
with Pool(2 ) as pool:
a_ = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a_ = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a_ = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCAmelCase ) == 4
| 364
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->int:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
a_ , a_ = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
a_ , a_ = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
a_ , a_ = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
a_ , a_ = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->Tuple:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
a_ , a_ = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
a_ , a_ = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def UpperCAmelCase__ ( self) ->str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a_ = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
a_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
a_ = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 0
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 367
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
a_ = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
a_ = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , )
a_ = val
return f[i][j]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
a_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
a_ = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
a_ = len(lowerCamelCase_ )
if num_items != len(lowerCamelCase_ ):
a_ = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(lowerCamelCase_ )} values'''
)
raise ValueError(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
if not isinstance(wt[i] , lowerCamelCase_ ):
a_ = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(lowerCamelCase_ )
a_ = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a_ = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ )
else:
optimal_set.add(lowerCamelCase_ )
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase_ = [3, 2, 4, 4]
UpperCamelCase_ = [4, 3, 2, 3]
UpperCamelCase_ = 4
UpperCamelCase_ = 6
UpperCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 368
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = 0
a_ = number
while duplicate > 0:
a_ , a_ = divmod(__UpperCamelCase , 10 )
fact_sum += factorial(__UpperCamelCase )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
UpperCamelCase_ = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ = list[tuple[int, int]]
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = pos_x
a_ = pos_y
a_ = (pos_y, pos_x)
a_ = goal_x
a_ = goal_y
a_ = g_cost
a_ = parent
a_ = self.calculate_heuristic()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = abs(self.pos_x - self.goal_x)
a_ = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self , __UpperCAmelCase) ->str:
return self.f_cost < other.f_cost
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowercase)
a_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _lowercase)
a_ = [self.start]
a_ = []
a_ = False
def UpperCAmelCase__ ( self) ->List[str]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
a_ = True
return self.retrace_path(_lowercase)
self.closed_nodes.append(_lowercase)
a_ = self.get_successors(_lowercase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowercase)
else:
# retrieve the best current path
a_ = self.open_nodes.pop(self.open_nodes.index(_lowercase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowercase)
else:
self.open_nodes.append(_lowercase)
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
a_ = []
for action in delta:
a_ = parent.pos_x + action[1]
a_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowercase , _lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowercase , ))
return successors
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
a_ = node
a_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
a_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCamelCase_ = GreedyBestFirst(init, goal)
UpperCamelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase_ = 2
for elem in grid:
print(elem)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( _a , unittest.TestCase ):
a_ : List[Any] = RoCBertTokenizer
a_ : Optional[Any] = None
a_ : str = False
a_ : Dict = True
a_ : str = filter_non_english
def UpperCAmelCase__ ( self) ->Union[str, Any]:
super().setUp()
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
a_ = {}
a_ = {}
for i, value in enumerate(__lowerCAmelCase):
a_ = i
a_ = i
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
a_ = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(__lowerCAmelCase , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase) , [5, 6, 2, 5, 7, 8])
def UpperCAmelCase__ ( self) ->Dict:
a_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def UpperCAmelCase__ ( self) ->Tuple:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def UpperCAmelCase__ ( self) ->Tuple:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def UpperCAmelCase__ ( self) ->int:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def UpperCAmelCase__ ( self) ->int:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def UpperCAmelCase__ ( self) ->List[str]:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def UpperCAmelCase__ ( self) ->Any:
a_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def UpperCAmelCase__ ( self) ->List[str]:
a_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
a_ = {}
for i, token in enumerate(__lowerCAmelCase):
a_ = i
a_ = RoCBertWordpieceTokenizer(vocab=__lowerCAmelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def UpperCAmelCase__ ( self) ->List[Any]:
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def UpperCAmelCase__ ( self) ->Any:
a_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
a_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def UpperCAmelCase__ ( self) ->List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
a_ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a_ = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
a_ = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , "do_lower_case") else False
a_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["的", "人", "有"]
a_ = "".join(__lowerCAmelCase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a_ = True
a_ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
a_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
a_ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase)
a_ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
a_ = False
a_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
a_ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
a_ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase)
a_ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase)
# it is expected that only the first Chinese character is not preceded by "##".
a_ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase)
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
a_ = tokenizer.encode("你好" , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer.encode("你是谁" , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase)
a_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_tokenizers(do_lower_case=__lowerCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
a_ = "你好,你是谁"
a_ = tokenizer.tokenize(__lowerCAmelCase)
a_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
a_ = tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase)
a_ = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase)
a_ = tokenizer.prepare_for_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
a_ = tokenizer.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( snake_case_ ):
def UpperCAmelCase__ ( self) ->int:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , "tf_padding"))
self.parent.assertTrue(hasattr(_A , "depth_multiplier"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.25 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=10_24 , __UpperCAmelCase=32 , __UpperCAmelCase="relu6" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = depth_multiplier
a_ = min_depth
a_ = tf_padding
a_ = int(last_hidden_size * depth_multiplier)
a_ = output_stride
a_ = hidden_act
a_ = classifier_dropout_prob
a_ = use_labels
a_ = is_training
a_ = num_labels
a_ = initializer_range
a_ = scope
def UpperCAmelCase__ ( self) ->str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self) ->str:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = MobileNetVaModel(config=_A)
model.to(_A)
model.eval()
a_ = model(_A)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = self.num_labels
a_ = MobileNetVaForImageClassification(_A)
model.to(_A)
model.eval()
a_ = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.prepare_config_and_inputs()
a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( snake_case_ , snake_case_ , unittest.TestCase ):
a_ : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a_ : Any = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a_ : Tuple = False
a_ : str = False
a_ : int = False
a_ : Union[str, Any] = False
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = MobileNetVaModelTester(self)
a_ = MobileNetVaConfigTester(self , config_class=_A , has_text_modality=_A)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->str:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Union[str, Any]:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions")
def UpperCAmelCase__ ( self) ->Tuple:
pass
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_A)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def UpperCAmelCase__ ( self) ->Optional[Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(_A , _A))
a_ = outputs.hidden_states
a_ = 26
self.assertEqual(len(_A) , _A)
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(_A , _A , _A)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = MobileNetVaModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def UpperCamelCase ( ) ->str:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->List[Any]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(_A)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=_A , return_tensors="pt").to(_A)
# forward pass
with torch.no_grad():
a_ = model(**_A)
# verify the logits
a_ = torch.Size((1, 10_01))
self.assertEqual(outputs.logits.shape , _A)
a_ = torch.tensor([-4.1_739, -1.1_233, 3.1_205]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
| 350
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class snake_case ( _SCREAMING_SNAKE_CASE ):
a_ : Tuple = """codegen"""
a_ : List[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=5_04_00 , __UpperCAmelCase=20_48 , __UpperCAmelCase=20_48 , __UpperCAmelCase=40_96 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Dict:
a_ = vocab_size
a_ = n_ctx
a_ = n_positions
a_ = n_embd
a_ = n_layer
a_ = n_head
a_ = n_inner
a_ = rotary_dim
a_ = activation_function
a_ = resid_pdrop
a_ = embd_pdrop
a_ = attn_pdrop
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = use_cache
a_ = bos_token_id
a_ = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase)
class snake_case ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) ->Union[str, Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase)
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase):
# TODO: how to do that better?
a_ = 0
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
a_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs")
a_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
a_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self) ->int:
return self._config.n_layer
@property
def UpperCAmelCase__ ( self) ->int:
return self._config.n_head
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) ->Mapping[str, Any]:
a_ = super(__UpperCAmelCase , self).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
a_ = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
a_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a_ = [
(torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)) for _ in range(self.num_layers)
]
a_ = common_inputs["""attention_mask"""]
if self.use_past:
a_ = ordered_inputs["""attention_mask"""].dtype
a_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def UpperCAmelCase__ ( self) ->int:
return 13
| 351
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase_ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
a_ : List[Any] = PegasusConfig
a_ : Any = {}
a_ : Optional[Any] = "gelu"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
a_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
a_ = np.concatenate([input_ids, eos_tensor] , axis=1)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return config, inputs_dict
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = 20
a_ = model_class_name(__UpperCAmelCase)
a_ = model.encode(inputs_dict["input_ids"])
a_ , a_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase)
a_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
a_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
a_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
a_ = model.decode(__UpperCAmelCase , __UpperCAmelCase)
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = 20
a_ = model_class_name(__UpperCAmelCase)
a_ = model.encode(inputs_dict["input_ids"])
a_ , a_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
a_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase)
a_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
a_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
a_ = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase)
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ) ->int:
"""simple docstring"""
if attention_mask is None:
a_ = np.not_equal(UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a_ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a_ : List[str] = True
a_ : Union[str, Any] = False
a_ : str = False
a_ : List[str] = False
def UpperCAmelCase__ ( self) ->int:
a_ = FlaxPegasusModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)
a_ = model_class(__UpperCAmelCase)
@jax.jit
def encode_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
with self.subTest("JIT Enabled"):
a_ = encode_jitted(**__UpperCAmelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a_ = encode_jitted(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase))
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase__ ( self) ->Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a_ = model_class(__UpperCAmelCase)
a_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
a_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest("JIT Enabled"):
a_ = decode_jitted(**__UpperCAmelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a_ = decode_jitted(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase))
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__UpperCAmelCase)
a_ = np.ones((1, 1))
a_ = model(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
a_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
a_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
a_ = [
"California\'s largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.",
]
a_ = tokenizer(__UpperCAmelCase , return_tensors="np" , truncation=__UpperCAmelCase , max_length=5_12 , padding=__UpperCAmelCase)
a_ = model.generate(**__UpperCAmelCase , num_beams=2).sequences
a_ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
assert tgt_text == decoded
| 353
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 0
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case ( lowercase_ ):
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) ->Any:
super().__init__(*a__ , **a__ )
a_ = eval_examples
a_ = post_process_function
def UpperCAmelCase__ ( self , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = "eval" , **__UpperCAmelCase , ) ->Dict[str, float]:
a_ = gen_kwargs.copy()
a_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
a_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
a_ = gen_kwargs
a_ = self.eval_dataset if eval_dataset is None else eval_dataset
a_ = self.get_eval_dataloader(a__ )
a_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ = self.compute_metrics
a_ = None
a_ = time.time()
a_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ = eval_loop(
a__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
a_ = compute_metrics
a_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ = self.post_process_function(a__ , a__ , a__ )
a_ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ = metrics.pop(a__ )
metrics.update(output.metrics )
else:
a_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ )
return metrics
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" , **__UpperCAmelCase ) ->List[str]:
a_ = gen_kwargs.copy()
a_ = self.get_test_dataloader(a__ )
# Temporarily disable metric computation, we will do it in the loop here.
a_ = self.compute_metrics
a_ = None
a_ = time.time()
a_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ = eval_loop(
a__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
a_ = compute_metrics
a_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ = self.post_process_function(a__ , a__ , a__ , "predict" )
a_ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ = metrics.pop(a__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ )
| 354
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Dict:
a_ = XLMRobertaModel.from_pretrained("xlm-roberta-base")
a_ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
a_ = torch.Size((1, 12, 7_68)) # batch_size, sequence_length, embedding_vector_dim
a_ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a_ = model(UpperCamelCase_)["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCamelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3))
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = XLMRobertaModel.from_pretrained("xlm-roberta-large")
a_ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
a_ = torch.Size((1, 12, 10_24)) # batch_size, sequence_length, embedding_vector_dim
a_ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a_ = model(UpperCamelCase_)["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCamelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3))
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=12 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ) ->Optional[Any]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = projection_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = dropout
a_ = attention_dropout
a_ = max_position_embeddings
a_ = initializer_range
a_ = scope
a_ = bos_token_id
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
a_ = input_mask.numpy()
a_ = input_mask.shape
a_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(__lowerCamelCase):
a_ = 1
a_ = 0
a_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCamelCase)
def UpperCAmelCase__ ( self) ->int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = TFBlipTextModel(config=__lowerCamelCase)
a_ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , training=__lowerCamelCase)
a_ = model(__lowerCamelCase , training=__lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self) ->int:
a_ = self.prepare_config_and_inputs()
a_ = config_and_inputs
a_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case ( lowerCamelCase__ , unittest.TestCase ):
a_ : Any = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[Any] = False
a_ : str = False
a_ : Dict = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BlipTextModelTester(self)
a_ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def UpperCAmelCase__ ( self) ->int:
pass
def UpperCAmelCase__ ( self) ->int:
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def UpperCAmelCase__ ( self) ->Any:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def UpperCAmelCase__ ( self) ->Tuple:
pass
@slow
def UpperCAmelCase__ ( self) ->Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFBlipTextModel.from_pretrained(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase=True) ->Any:
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCamelCase)
| 356
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Union[str, Any]:
"""simple docstring"""
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ = ''
else:
a_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = dct.pop(UpperCAmelCase_ )
a_ = val
def UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
a_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) ->List[Any]:
"""simple docstring"""
a_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a_ = 8
# set labels if required
if not base_model:
a_ = 1_000
a_ = 'huggingface/label-files'
a_ = 'imagenet-1k-id2label.json'
a_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a_ = 384
a_ = 1_536
a_ = 12
a_ = 6
# load original model from torch hub
a_ = torch.hub.load("facebookresearch/dino:main" , UpperCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a_ = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
a_ = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if base_model:
a_ = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ).eval()
else:
a_ = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
a_ = ViTImageProcessor()
a_ = image_processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding['pixel_values']
a_ = model(UpperCAmelCase_ )
if base_model:
a_ = original_model(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a_ = original_model(UpperCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 357
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 0
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=[] ) ->Union[str, Any]:
"""simple docstring"""
a_ = size[0] - overlap_pixels * 2
a_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a_ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
a_ = np.pad(_A , mode="linear_ramp" , pad_width=_A , end_values=0 )
if "l" in remove_borders:
a_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return max(_A , min(_A , _A ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = list(_A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a_ = clamp_rect(_A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_A , (original_slice, 0) )
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a_ = tile.crop(_A )
return tile
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = n % d
return n - divisor
class snake_case ( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 3_50 , ) ->Optional[Any]:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Dict:
torch.manual_seed(0)
a_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size),
min(image.size[0] , (x + 1) * tile_size),
min(image.size[1] , (y + 1) * tile_size),
)
a_ = add_overlap_rect(__snake_case , __snake_case , image.size)
a_ = image.crop(__snake_case)
a_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a_ = translated_slice_x - (original_image_slice / 2)
a_ = max(0 , __snake_case)
a_ = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case)
a_ = to_input.size
a_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC)
a_ = super(__snake_case , self).__call__(image=__snake_case , **__snake_case).images[0]
a_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC)
a_ = unsqueeze_tile(__snake_case , __snake_case)
a_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC)
a_ = []
if x == 0:
remove_borders.append("l")
elif crop_rect[2] == image.size[0]:
remove_borders.append("r")
if y == 0:
remove_borders.append("t")
elif crop_rect[3] == image.size[1]:
remove_borders.append("b")
a_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case) , mode="L" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case)
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 75 , __UpperCAmelCase = 9.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_28 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , ) ->Union[str, Any]:
a_ = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4))
a_ = math.ceil(image.size[0] / tile_size)
a_ = math.ceil(image.size[1] / tile_size)
a_ = tcx * tcy
a_ = 0
for y in range(__snake_case):
for x in range(__snake_case):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image})
return final_image
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = 'stabilityai/stable-diffusion-x4-upscaler'
a_ = StableDiffusionTiledUpscalePipeline.from_pretrained(_A , revision="fp16" , torch_dtype=torch.floataa )
a_ = pipe.to("cuda" )
a_ = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(UpperCAmelCase ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
a_ = pipe(image=_A , prompt="Black font, white background, vector" , noise_level=40 , callback=_A )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt"}
UpperCamelCase_ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
UpperCamelCase_ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
UpperCamelCase_ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class snake_case ( SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = VOCAB_FILES_NAMES
a_ : Any = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = PRETRAINED_INIT_CONFIGURATION
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = ConvBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Optional[Any]:
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , A__) != do_lower_case
or normalizer_state.get("strip_accents" , A__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__) != tokenize_chinese_chars
):
a_ = getattr(A__ , normalizer_state.pop("type"))
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**A__)
a_ = do_lower_case
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None) ->List[Any]:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->int:
a_ = self._tokenizer.model.save(A__ , name=A__)
return tuple(A__)
| 359
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase ( UpperCAmelCase = "isbn/0140328726" ) ->dict:
"""simple docstring"""
a_ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
a_ = F'''{olid} is not a valid Open Library olid'''
raise ValueError(UpperCAmelCase )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCamelCase ( UpperCAmelCase ) ->dict:
"""simple docstring"""
a_ = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
a_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
a_ = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
a_ = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = ", ".join(UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCamelCase_ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCamelCase_ = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('\n'.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class snake_case ( lowerCamelCase__ ):
a_ : Optional[int] = 42
class snake_case ( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->List[Any]:
super().__init__()
self.register_modules(
prior=__A , image_encoder=__A , image_processor=__A , scheduler=__A , renderer=__A , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
if latents is None:
a_ = randn_tensor(__A , generator=__A , device=__A , dtype=__A)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
a_ = latents.to(__A)
a_ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self , __UpperCAmelCase=0) ->Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a_ = torch.device(F'''cuda:{gpu_id}''')
a_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A , __A)
@property
def UpperCAmelCase__ ( self) ->Union[str, Any]:
if self.device != torch.device("meta") or not hasattr(self.image_encoder , "_hf_hook"):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Optional[Any]:
if isinstance(__A , __A) and isinstance(image[0] , torch.Tensor):
a_ = torch.cat(__A , axis=0) if image[0].ndim == 4 else torch.stack(__A , axis=0)
if not isinstance(__A , torch.Tensor):
a_ = self.image_processor(__A , return_tensors="pt").pixel_values[0].unsqueeze(0)
a_ = image.to(dtype=self.image_encoder.dtype , device=__A)
a_ = self.image_encoder(__A)['''last_hidden_state''']
a_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a_ = image_embeds.repeat_interleave(__A , dim=0)
if do_classifier_free_guidance:
a_ = torch.zeros_like(__A)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a_ = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A)
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 25 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 64 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) ->Any:
if isinstance(__A , PIL.Image.Image):
a_ = 1
elif isinstance(__A , torch.Tensor):
a_ = image.shape[0]
elif isinstance(__A , __A) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
a_ = len(__A)
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A)}''')
a_ = self._execution_device
a_ = batch_size * num_images_per_prompt
a_ = guidance_scale > 1.0
a_ = self._encode_image(__A , __A , __A , __A)
# prior
self.scheduler.set_timesteps(__A , device=__A)
a_ = self.scheduler.timesteps
a_ = self.prior.config.num_embeddings
a_ = self.prior.config.embedding_dim
a_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __A , __A , __A , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a_ = latents.reshape(latents.shape[0] , __A , __A)
for i, t in enumerate(self.progress_bar(__A)):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ = self.scheduler.scale_model_input(__A , __A)
a_ = self.prior(
__A , timestep=__A , proj_embedding=__A , ).predicted_image_embedding
# remove the variance
a_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a_ = noise_pred.chunk(2)
a_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a_ = self.scheduler.step(
__A , timestep=__A , sample=__A , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A)
a_ = []
for i, latent in enumerate(__A):
print()
a_ = self.renderer.decode(
latent[None, :] , __A , size=__A , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(__A)
a_ = torch.stack(__A)
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''')
a_ = images.cpu().numpy()
if output_type == "pil":
a_ = [self.numpy_to_pil(__A) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A)
| 361
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
def count_of_possible_combinations(UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__snake_case )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase , UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a_ = sum(
count_of_possible_combinations_with_dp_array(target - item , __snake_case )
for item in array )
a_ = answer
return answer
a_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = [0] * (target + 1)
a_ = 1
for i in range(1 , target + 1 ):
for j in range(__snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = 3
UpperCamelCase_ = 5
UpperCamelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 362
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar('T')
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (position - 1) // 2
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (2 * position) + 1
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (2 * position) + 2
class snake_case ( Generic[T] ):
def __init__( self) ->None:
a_ = []
a_ = {}
a_ = 0
def __len__( self) ->int:
return self.elements
def __repr__( self) ->str:
return str(self.heap)
def UpperCAmelCase__ ( self) ->bool:
return self.elements == 0
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
self.heap.append((elem, weight))
a_ = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase__)
def UpperCAmelCase__ ( self) ->T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
a_ , a_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a_ , a_ = self.heap[0]
self._bubble_down(UpperCamelCase__)
return elem
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
a_ = self.position_map[elem]
a_ = (elem, weight)
if position > 0:
a_ = get_parent_position(UpperCamelCase__)
a_ , a_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase__)
else:
self._bubble_down(UpperCamelCase__)
else:
self._bubble_down(UpperCamelCase__)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.position_map[elem]
if curr_pos == 0:
return None
a_ = get_parent_position(UpperCamelCase__)
a_ , a_ = self.heap[curr_pos]
a_ , a_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__)
return self._bubble_up(UpperCamelCase__)
return None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.position_map[elem]
a_ , a_ = self.heap[curr_pos]
a_ = get_child_left_position(UpperCamelCase__)
a_ = get_child_right_position(UpperCamelCase__)
if child_left_position < self.elements and child_right_position < self.elements:
a_ , a_ = self.heap[child_left_position]
a_ , a_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__)
return self._bubble_down(UpperCamelCase__)
if child_left_position < self.elements:
a_ , a_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__)
return self._bubble_down(UpperCamelCase__)
else:
return None
if child_right_position < self.elements:
a_ , a_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__)
return self._bubble_down(UpperCamelCase__)
return None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
a_ = self.heap[nodea_pos][0]
a_ = self.heap[nodea_pos][0]
a_ , a_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a_ = nodea_pos
a_ = nodea_pos
class snake_case ( Generic[T] ):
def __init__( self) ->None:
a_ = {}
a_ = 0
def __repr__( self) ->str:
return str(self.connections)
def __len__( self) ->int:
return self.nodes
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
a_ = {}
self.nodes += 1
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->None:
self.add_node(UpperCamelCase__)
self.add_node(UpperCamelCase__)
a_ = weight
a_ = weight
def UpperCamelCase ( UpperCAmelCase , ) ->tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
a_ = {node: maxsize for node in graph.connections}
a_ = {node: None for node in graph.connections}
a_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCAmelCase__ , UpperCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
a_ = priority_queue.extract_min()
a_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__ , dist[neighbour] )
a_ = node
# running prim's algorithm
while not priority_queue.is_empty():
a_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__ , dist[neighbour] )
a_ = node
return dist, parent
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase_ = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
UpperCamelCase_ = '|'.join(sys.argv[1:])
UpperCamelCase_ = re.compile(RF"""^({joined_dirs}).*?\.py$""")
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 364
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case ( a__ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->int:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self) ->List[Any]:
raise NotImplementedError()
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self) ->Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ort.SessionOptions()
a_ = False
return options
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = "A red cat sitting on a park bench"
a_ = np.random.RandomState(0)
a_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type="np" , )
a_ = output.images
a_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
a_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase__ ( self) ->List[str]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
a_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = "A red cat sitting on a park bench"
a_ = np.random.RandomState(0)
a_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type="np" , )
a_ = output.images
a_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
a_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 366
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
a_ = 9
a_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a_ = kruskal(snake_case_ , snake_case_ )
a_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case_ ) == sorted(snake_case_ )
| 368
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 0
|
"""simple docstring"""
import math
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
a_ = numbers[0]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
a_ = numbers[i]
if number < 0:
a_ = min_till_now, max_till_now
a_ = max(__SCREAMING_SNAKE_CASE , max_till_now * number )
a_ = min(__SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
a_ = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max_prod
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase_ = '''http://www.mocksite.com/file1.txt'''
UpperCamelCase_ = '''"text": ["foo", "foo"]'''
UpperCamelCase_ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class snake_case :
a_ : Union[str, Any] = 200
a_ : str = {'''Content-Length''': '''100'''}
a_ : Any = {}
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
return [bytes(__snake_case , "utf-8")]
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCAmelCase , "request" , __lowerCAmelCase )
a_ = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = {"train": url}
a_ = "dummy"
a_ = "downloads"
a_ = tmp_path
a_ = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
a_ = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
a_ = dl_manager.download(__lowerCAmelCase )
a_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a_ = [downloaded_paths]
a_ = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
a_ = downloaded_paths.values()
a_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a_ = Path(__lowerCAmelCase )
a_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a_ = downloaded_path.read_text()
assert content == CONTENT
a_ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
a_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a_ = {"train": filename}
a_ = "dummy"
a_ = xz_file.parent
a_ = "extracted"
a_ = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
a_ = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
a_ = dl_manager.extract(__lowerCAmelCase )
a_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a_ = [extracted_paths]
a_ = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
a_ = extracted_paths.values()
a_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a_ = Path(__lowerCAmelCase )
a_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a_ = extracted_path.read_text()
a_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
a_ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = request.getfixturevalue(__lowerCAmelCase )
a_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = request.getfixturevalue(__lowerCAmelCase )
a_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 350
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """speech_to_text_2"""
a_ : Tuple = ["""past_key_values"""]
a_ : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=10_24 , **__UpperCAmelCase , ) ->Any:
a_ = vocab_size
a_ = d_model
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = decoder_layerdrop
a_ = use_cache
a_ = decoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = max_target_positions
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 351
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = DDIMPipeline
a_ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ : Tuple = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a_ : Union[str, Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
torch.manual_seed(0)
a_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
a_ = DDIMScheduler()
a_ = {"unet": unet, "scheduler": scheduler}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->str:
if str(__UpperCAmelCase).startswith("mps"):
a_ = torch.manual_seed(__UpperCAmelCase)
else:
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = "cpu"
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
a_ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04])
a_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3)
def UpperCAmelCase__ ( self) ->str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCAmelCase__ ( self) ->Dict:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCAmelCase__ ( self) ->List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Tuple:
a_ = "google/ddpm-cifar10-32"
a_ = UNetaDModel.from_pretrained(__UpperCAmelCase)
a_ = DDIMScheduler()
a_ = DDIMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
ddim.to(__UpperCAmelCase)
ddim.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = torch.manual_seed(0)
a_ = ddim(generator=__UpperCAmelCase , eta=0.0 , output_type="numpy").images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase__ ( self) ->Dict:
a_ = "google/ddpm-ema-bedroom-256"
a_ = UNetaDModel.from_pretrained(__UpperCAmelCase)
a_ = DDIMScheduler.from_pretrained(__UpperCAmelCase)
a_ = DDIMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
ddpm.to(__UpperCAmelCase)
ddpm.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = torch.manual_seed(0)
a_ = ddpm(generator=__UpperCAmelCase , output_type="numpy").images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a_ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
from __future__ import annotations
import numpy as np
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 353
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase , UpperCAmelCase ) for gt in ground_truths )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()]
a_ = []
if args.gold_data_mode == "qa":
a_ = pd.read_csv(UpperCAmelCase , sep="\t" , header=UpperCAmelCase )
for answer_list in data[1]:
a_ = ast.literal_eval(UpperCAmelCase )
answers.append(UpperCAmelCase )
else:
a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()]
a_ = [[reference] for reference in references]
a_ = a_ = a_ = 0
for prediction, ground_truths in zip(UpperCAmelCase , UpperCAmelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
fa += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = 100.0 * em / total
a_ = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = args.k
a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()]
a_ = [line.strip() for line in open(UpperCAmelCase , "r" ).readlines()]
a_ = a_ = 0
for hypo, reference in zip(UpperCAmelCase , UpperCAmelCase ):
a_ = set(hypo.split("\t" )[:k] )
a_ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a_ = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def strip_title(UpperCAmelCase ):
if title.startswith("\"" ):
a_ = title[1:]
if title.endswith("\"" ):
a_ = title[:-1]
return title
a_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase , truncation=UpperCAmelCase , )["input_ids"].to(args.device )
a_ = rag_model.rag.question_encoder(UpperCAmelCase )
a_ = question_enc_outputs[0]
a_ = rag_model.retriever(
UpperCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a_ = []
for docs in all_docs:
a_ = [strip_title(UpperCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(UpperCAmelCase ) )
return provenance_strings
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
with torch.no_grad():
a_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
a_ = inputs_dict.input_ids.to(args.device )
a_ = inputs_dict.attention_mask.to(args.device )
a_ = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase , attention_mask=UpperCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a_ = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
if args.print_predictions:
for q, a in zip(UpperCAmelCase , UpperCAmelCase ):
logger.info("Q: {} - A: {}".format(UpperCAmelCase , UpperCAmelCase ) )
return answers
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=UpperCAmelCase , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=UpperCAmelCase , type=UpperCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=UpperCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=UpperCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=UpperCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=UpperCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=UpperCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a_ = parser.parse_args()
a_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {}
if args.model_type is None:
a_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a_ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a_ = args.n_docs
if args.index_name is not None:
a_ = args.index_name
if args.index_path is not None:
a_ = args.index_path
else:
a_ = BartForConditionalGeneration
a_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase )
a_ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a_ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a_ = RagRetriever.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
a_ = model_class.from_pretrained(UpperCAmelCase , retriever=UpperCAmelCase , **UpperCAmelCase )
model.retriever.init_retrieval()
else:
a_ = model_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a_ = []
for line in tqdm(UpperCAmelCase ):
questions.append(line.strip() )
if len(UpperCAmelCase ) == args.eval_batch_size:
a_ = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
preds_file.write("\n".join(UpperCAmelCase ) + "\n" )
preds_file.flush()
a_ = []
if len(UpperCAmelCase ) > 0:
a_ = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
preds_file.write("\n".join(UpperCAmelCase ) )
preds_file.flush()
score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase_ = get_args()
main(args)
| 354
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """gpt_neox_japanese"""
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=25_60 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1.00 , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=3_19_96 , __UpperCAmelCase=3_19_99 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ) ->Tuple:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_multiple_size
a_ = hidden_act
a_ = rotary_pct
a_ = rotary_emb_base
a_ = initializer_range
a_ = layer_norm_eps
a_ = use_cache
a_ = attention_dropout
a_ = hidden_dropout
| 356
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 0
|
"""simple docstring"""
from timeit import timeit
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a_ = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase ( ) ->None:
"""simple docstring"""
def do_benchmark(UpperCAmelCase ) -> None:
a_ = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(UpperCAmelCase ) = }''' )
a_ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase ) = }''' )
a_ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 357
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """deit"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ) ->Dict:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def UpperCAmelCase__ ( self) ->float:
return 1E-4
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=None , ) ->List[Any]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = num_stages
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = intermediate_size
a_ = hidden_act
a_ = num_labels
a_ = initializer_range
a_ = out_features
a_ = out_indices
a_ = scope
def UpperCAmelCase__ ( self) ->Tuple:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = ConvNextModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = ConvNextForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = ConvNextBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a_ = None
a_ = ConvNextBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCAmelCase__ ( self) ->Any:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a_ : int = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a_ : str = True
a_ : Dict = False
a_ : List[Any] = False
a_ : Dict = False
a_ : Union[str, Any] = False
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ConvNextModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self) ->Tuple:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->str:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking")
def UpperCAmelCase__ ( self) ->Dict:
pass
def UpperCAmelCase__ ( self) ->List[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = ConvNextModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->str:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(__UpperCAmelCase)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = torch.tensor([-0.0_260, -0.4_739, 0.1_911]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
a_ : List[Any] = ConvNextConfig
a_ : Dict = False
def UpperCAmelCase__ ( self) ->Any:
a_ = ConvNextModelTester(self)
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def UpperCamelCase ( UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
a_ = np.nan
for i in range(UpperCAmelCase ):
a_ = features[:, labels == i]
a_ = data.mean(1 )
# Centralize the data of class i
a_ = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
a_ = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
a_ = features.mean(1 )
a_ = np.nan
for i in range(UpperCAmelCase ):
a_ = features[:, labels == i]
a_ = data.shape[1]
a_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
a_ = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
if features.any():
a_ = features.mean(1 )
# Center the dataset
a_ = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
a_ = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
a_ , a_ = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
a_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
a_ = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
a_ , a_ = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
a_ = eigenvectors[:, ::-1][:, :dimensions]
a_ , a_ , a_ = np.linalg.svd(UpperCAmelCase )
a_ = svd_matrix[:, 0:dimensions]
a_ = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
a_ = np.array([0, 0, 0, 1, 1] )
a_ = 2
a_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
a_ = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
a_ = 2
a_ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
a_ = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 0
|
"""simple docstring"""
UpperCamelCase_ = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 362
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 0
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCamelCase_ = input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
UpperCamelCase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
UpperCamelCase_ = soup.find('meta', {'property': 'og:image'})['content']
UpperCamelCase_ = requests.get(image_url).content
UpperCamelCase_ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
UpperCamelCase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
UpperCamelCase_ = '▁'
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] = """left"""
a_ : Optional[Any] = XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , ) ->List[str]:
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = 3
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase):
copyfile(self.vocab_file , __UpperCAmelCase)
return (out_vocab_file,)
| 364
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case ( unittest.TestCase ):
a_ : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
a_ : Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt")
# Using `do_sample=False` to force deterministic output
a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
a_ = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
__UpperCAmelCase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{"generated_token_ids": ANY(__UpperCAmelCase)},
{"generated_token_ids": ANY(__UpperCAmelCase)},
] , )
a_ = text_generator.model.config.eos_token_id
a_ = "<pad>"
a_ = text_generator(
["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{"generated_token_ids": ANY(__UpperCAmelCase)},
{"generated_token_ids": ANY(__UpperCAmelCase)},
],
[
{"generated_token_ids": ANY(__UpperCAmelCase)},
{"generated_token_ids": ANY(__UpperCAmelCase)},
],
] , )
@require_tf
def UpperCAmelCase__ ( self) ->Any:
a_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf")
# Using `do_sample=False` to force deterministic output
a_ = text_generator("This is a test" , do_sample=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
a_ = text_generator(["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase__ ( self) ->Dict:
a_ = "Hello I believe in"
a_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
a_ = text_generator(__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
a_ = text_generator(__UpperCAmelCase , stop_sequence=" fe")
self.assertEqual(__UpperCAmelCase , [{"generated_text": "Hello I believe in fe"}])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = text_generator.model
a_ = text_generator.tokenizer
a_ = text_generator("This is a test")
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
a_ = text_generator("This is a test" , return_full_text=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
a_ = pipeline(task="text-generation" , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase)
a_ = text_generator("This is a test")
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
a_ = text_generator("This is a test" , return_full_text=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
a_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
[{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}],
[{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}],
] , )
if text_generator.tokenizer.pad_token is not None:
a_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
[{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}],
[{"generated_text": ANY(__UpperCAmelCase)}, {"generated_text": ANY(__UpperCAmelCase)}],
] , )
with self.assertRaises(__UpperCAmelCase):
a_ = text_generator("test" , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase)
with self.assertRaises(__UpperCAmelCase):
a_ = text_generator("test" , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase)
with self.assertRaises(__UpperCAmelCase):
a_ = text_generator("test" , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
a_ = text_generator("")
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase)}])
else:
with self.assertRaises((ValueError, AssertionError)):
a_ = text_generator("")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
a_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 5_00 , max_new_tokens=20)
a_ = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase):
text_generator(
"This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self) ->str:
import torch
# Classic `model_kwargs`
a_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
a_ = pipe("This is a test")
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
a_ = pipe("This is a test")
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto")
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
a_ = pipe("This is a test")
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase__ ( self) ->Dict:
import torch
a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa)
pipe("This is a test")
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
import torch
a_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa)
pipe("This is a test" , do_sample=__UpperCAmelCase , top_p=0.5)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = "Hello world"
a_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
if text_generator.model.framework == "tf":
a_ = logging.get_logger("transformers.generation.tf_utils")
else:
a_ = logging.get_logger("transformers.generation.utils")
a_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase) as cl:
a_ = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1)
self.assertIn(__UpperCAmelCase , cl.out)
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase) as cl:
a_ = text_generator(__UpperCAmelCase , max_new_tokens=1)
self.assertNotIn(__UpperCAmelCase , cl.out)
with CaptureLogger(__UpperCAmelCase) as cl:
a_ = text_generator(__UpperCAmelCase , max_length=10)
self.assertNotIn(__UpperCAmelCase , cl.out)
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['DeiTFeatureExtractor']
UpperCamelCase_ = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 367
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase_ = True
except (ImportError, AttributeError):
UpperCamelCase_ = object
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
pass
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger('transformers-cli/serving')
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(UpperCAmelCase , args.host , args.port , args.workers )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : dict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str]
a_ : Optional[List[int]]
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->int:
a_ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints.")
serve_parser.add_argument(
"--task" , type=__UpperCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=__UpperCAmelCase , default="localhost" , help="Interface the server will listen on.")
serve_parser.add_argument("--port" , type=__UpperCAmelCase , default=88_88 , help="Port the serving will listen to.")
serve_parser.add_argument("--workers" , type=__UpperCAmelCase , default=1 , help="Number of http workers")
serve_parser.add_argument("--model" , type=__UpperCAmelCase , help="Model's name or path to stored model.")
serve_parser.add_argument("--config" , type=__UpperCAmelCase , help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer" , type=__UpperCAmelCase , help="Tokenizer name to use.")
serve_parser.add_argument(
"--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=__UpperCAmelCase)
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = pipeline
a_ = host
a_ = port
a_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately.")
else:
logger.info(F'''Serving model over {host}:{port}''')
a_ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
] , timeout=6_00 , )
def UpperCAmelCase__ ( self) ->Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers)
def UpperCAmelCase__ ( self) ->Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->str:
try:
a_ = self._pipeline.tokenizer.tokenize(__UpperCAmelCase)
if return_ids:
a_ = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
return ServeTokenizeResult(tokens=__UpperCAmelCase , tokens_ids=__UpperCAmelCase)
else:
return ServeTokenizeResult(tokens=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)})
def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , ) ->List[str]:
try:
a_ = self._pipeline.tokenizer.decode(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return ServeDeTokenizeResult(model="" , text=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)})
async def UpperCAmelCase__ ( self , __UpperCAmelCase=Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->int:
# Check we don't have empty string
if len(__UpperCAmelCase) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
a_ = self._pipeline(__UpperCAmelCase)
return ServeForwardResult(output=__UpperCAmelCase)
except Exception as e:
raise HTTPException(5_00 , {"error": str(__UpperCAmelCase)})
| 368
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 0
|
"""simple docstring"""
import os
import sys
UpperCamelCase_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Any:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ , UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for i in range(UpperCAmelCase ):
a_ = dataset[i]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ):
a_ = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(UpperCAmelCase ):
a_ = dataset[i]
@get_duration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(0 , UpperCAmelCase , UpperCAmelCase ):
a_ = dataset[i : i + batch_size]
def UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
a_ = {"num examples": SPEED_TEST_N_EXAMPLES}
a_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a_ = generate_example_dataset(
os.path.join(UpperCAmelCase , "dataset.arrow" ) , UpperCAmelCase , num_examples=UpperCAmelCase , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCAmelCase ) )
a_ = func(UpperCAmelCase , **UpperCAmelCase )
print("shuffling dataset" )
a_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(UpperCAmelCase ) )
a_ = func(
UpperCAmelCase , **UpperCAmelCase )
with open(UpperCAmelCase , "wb" ) as f:
f.write(json.dumps(UpperCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['PoolFormerFeatureExtractor']
UpperCamelCase_ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 350
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) ->str:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = embeddings_size
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = num_labels
a_ = scope
a_ = len(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->str:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = TFResNetModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFResNetForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a_ : Optional[int] = False
a_ : str = False
a_ : Any = False
a_ : List[Any] = False
a_ : Any = False
def UpperCAmelCase__ ( self) ->str:
a_ = TFResNetModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self) ->List[Any]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->int:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Tuple:
pass
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ = layer_type
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFResNetModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 351
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
a_ = hf_model.adapter
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = full_name.split("adaptor." )[-1]
a_ = name.split("." )
if items[1].isdigit():
a_ = int(items[1] )
else:
a_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
a_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ , a_ = emb.weight.shape
a_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
a_ = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->List[str]:
"""simple docstring"""
a_ = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
a_ = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
a_ = model[0].eval()
# load feature extractor
a_ = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
a_ = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
a_ = MBartForCausalLM(UpperCAmelCase )
a_ , a_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
a_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
a_ = False
a_ = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = hf_wavavec.config.to_dict()
a_ = tokenizer.pad_token_id
a_ = tokenizer.bos_token_id
a_ = tokenizer.eos_token_id
a_ = "mbart50"
a_ = "wav2vec2"
a_ = tokenizer.eos_token_id
a_ = 250_004
a_ = tokenizer.eos_token_id
a_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
import argparse
import os
import re
import packaging.version
UpperCamelCase_ = 'examples/'
UpperCamelCase_ = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase_ = 'README.md'
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.read()
a_ , a_ = REPLACE_PATTERNS[pattern]
a_ = replace.replace("VERSION" , UpperCAmelCase )
a_ = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern="examples" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = "🤗 Transformers currently provides the following architectures"
a_ = "1. Want to contribute a new model?"
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
# Find the start of the list.
a_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
a_ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
a_ = f.read()
a_ = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase=False ) ->List[Any]:
"""simple docstring"""
a_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
a_ = default_version.base_version
elif patch:
a_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCAmelCase ) == 0:
a_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = get_version()
a_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a_ = current_version.base_version
# Check with the user we got that right.
a_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCAmelCase ) == 0:
a_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 353
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = StableUnCLIPImgaImgPipeline
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ : str = frozenset([] )
def UpperCAmelCase__ ( self ) ->Any:
a_ = 32
a_ = embedder_hidden_size
# image encoding components
a_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a_ = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
a_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
a_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
a_ = AutoencoderKL()
a_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=True ) ->Union[str, Any]:
if str(__UpperCAmelCase ).startswith("mps" ):
a_ = torch.manual_seed(__UpperCAmelCase )
else:
a_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if pil_image:
a_ = input_image * 0.5 + 0.5
a_ = input_image.clamp(0 , 1 )
a_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a_ = DiffusionPipeline.numpy_to_pil(__UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase__ ( self ) ->int:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = StableUnCLIPImgaImgPipeline(**__UpperCAmelCase )
a_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a_ = self.get_dummy_inputs(__UpperCAmelCase )
inputs.update({"image_embeds": None} )
a_ = sd_pipe(**__UpperCAmelCase ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self ) ->Optional[int]:
a_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->Any:
a_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self ) ->Dict:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) ->Union[str, Any]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device="cpu" ).manual_seed(0 )
a_ = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np" )
a_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->Union[str, Any]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device="cpu" ).manual_seed(0 )
a_ = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np" )
a_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ ( self ) ->Tuple:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
a_ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = pipe(
__UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
a_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 354
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = """unispeech"""
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_28 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=3_20 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_00 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_56 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) ->List[Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a_ = hidden_size
a_ = feat_extract_norm
a_ = feat_extract_activation
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = conv_bias
a_ = num_conv_pos_embeddings
a_ = num_conv_pos_embedding_groups
a_ = len(self.conv_dim)
a_ = num_hidden_layers
a_ = intermediate_size
a_ = hidden_act
a_ = num_attention_heads
a_ = hidden_dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = feat_proj_dropout
a_ = final_dropout
a_ = layerdrop
a_ = layer_norm_eps
a_ = initializer_range
a_ = num_ctc_classes
a_ = vocab_size
a_ = do_stable_layer_norm
a_ = use_weighted_layer_sum
a_ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ = apply_spec_augment
a_ = mask_time_prob
a_ = mask_time_length
a_ = mask_time_min_masks
a_ = mask_feature_prob
a_ = mask_feature_length
a_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a_ = num_codevectors_per_group
a_ = num_codevector_groups
a_ = contrastive_logits_temperature
a_ = feat_quantizer_dropout
a_ = num_negatives
a_ = codevector_dim
a_ = proj_codevector_dim
a_ = diversity_loss_weight
# ctc loss
a_ = ctc_loss_reduction
a_ = ctc_zero_infinity
# pretraining loss
a_ = replace_prob
@property
def UpperCAmelCase__ ( self) ->str:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCamelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 356
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 357
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = 4_000_000 ) ->int:
"""simple docstring"""
a_ = [0, 1]
a_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
a_ = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_28 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) ->int:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def UpperCAmelCase__ ( self) ->List[str]:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->Dict:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = NezhaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->str:
a_ = True
a_ = NezhaModel(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = NezhaForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = NezhaForNextSentencePrediction(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = NezhaForPreTraining(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = NezhaForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = self.num_labels
a_ = NezhaForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = self.num_labels
a_ = NezhaForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self.num_choices
a_ = NezhaForMultipleChoice(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : str = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] = True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]:
a_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
if return_labels:
if model_class in get_values(__UpperCAmelCase):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
return inputs_dict
def UpperCAmelCase__ ( self) ->int:
a_ = NezhaModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = NezhaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self) ->int:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ = True
a_ = model_class(config=__UpperCAmelCase)
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)
a_ = torch.jit.trace(
__UpperCAmelCase , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , "bert.pt"))
a_ = torch.jit.load(os.path.join(__UpperCAmelCase , "bert.pt") , map_location=__UpperCAmelCase)
loaded(inputs_dict["input_ids"].to(__UpperCAmelCase) , inputs_dict["attention_mask"].to(__UpperCAmelCase))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a_ = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4))
@slow
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a_ = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4))
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return TrainCommand(UpperCAmelCase )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Union[str, Any]:
a_ = parser.add_parser("train" , help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=__UpperCAmelCase , default=0 , help="Column of the dataset csv file with example labels.")
train_parser.add_argument(
"--column_text" , type=__UpperCAmelCase , default=1 , help="Column of the dataset csv file with example texts.")
train_parser.add_argument(
"--column_id" , type=__UpperCAmelCase , default=2 , help="Column of the dataset csv file with example ids.")
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers).")
train_parser.add_argument("--validation_data" , type=__UpperCAmelCase , default="" , help="path to validation dataset.")
train_parser.add_argument(
"--validation_split" , type=__UpperCAmelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=__UpperCAmelCase , default="./" , help="path to saved the trained model.")
train_parser.add_argument(
"--task" , type=__UpperCAmelCase , default="text_classification" , help="Task to train the model on.")
train_parser.add_argument(
"--model" , type=__UpperCAmelCase , default="bert-base-uncased" , help="Model's name or path to stored model.")
train_parser.add_argument("--train_batch_size" , type=__UpperCAmelCase , default=32 , help="Batch size for training.")
train_parser.add_argument("--valid_batch_size" , type=__UpperCAmelCase , default=64 , help="Batch size for validation.")
train_parser.add_argument("--learning_rate" , type=__UpperCAmelCase , default=3E-5 , help="Learning rate.")
train_parser.add_argument("--adam_epsilon" , type=__UpperCAmelCase , default=1E-08 , help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=__UpperCAmelCase)
def __init__( self , __UpperCAmelCase) ->List[Any]:
a_ = logging.get_logger("transformers-cli/training")
a_ = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=__UpperCAmelCase)
a_ = args.output
a_ = args.column_label
a_ = args.column_text
a_ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''')
if args.task == "text_classification":
a_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''')
a_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''')
a_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ = args.validation_split
a_ = args.train_batch_size
a_ = args.valid_batch_size
a_ = args.learning_rate
a_ = args.adam_epsilon
def UpperCAmelCase__ ( self) ->Optional[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
raise NotImplementedError
def UpperCAmelCase__ ( self) ->Any:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 361
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 0
|
"""simple docstring"""
from torch import nn
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 362
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = 3
a_ = 2_50
a_ = ids_tensor((batch_size, length) , __UpperCAmelCase)
a_ = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float) / length
return input_ids, scores
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ , a_ = self._get_tensors(5)
a_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->int:
a_ = MaxLengthCriteria(max_length=10)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self._get_tensors(5)
a_ = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Any:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(__UpperCAmelCase):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
a_ = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(__UpperCAmelCase) , 1)
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = (UniPCMultistepScheduler,)
a_ : Optional[Any] = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Dict:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ , a_ = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1):
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , **__UpperCAmelCase) ->str:
if scheduler is None:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ = dummy_past_residuals[: scheduler.config.solver_order]
a_ = scheduler.timesteps[5]
a_ = scheduler.timesteps[6]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) ->List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a_ = UniPCMultistepScheduler(**self.get_scheduler_config())
a_ = self.full_loop(scheduler=__UpperCAmelCase)
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
a_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
a_ = DEISMultistepScheduler.from_config(scheduler.config)
a_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
a_ = UniPCMultistepScheduler.from_config(scheduler.config)
a_ = self.full_loop(scheduler=__UpperCAmelCase)
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def UpperCAmelCase__ ( self) ->str:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
self.check_over_configs(thresholding=__UpperCAmelCase)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
a_ = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self) ->List[str]:
self.check_over_configs(lower_order_final=__UpperCAmelCase)
self.check_over_configs(lower_order_final=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.full_loop()
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def UpperCAmelCase__ ( self) ->Any:
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.1_014) < 1E-3
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 364
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = """distilbert"""
a_ : int = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=5_12 , __UpperCAmelCase=False , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=7_68 , __UpperCAmelCase=4 * 7_68 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.2 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) ->Tuple:
a_ = vocab_size
a_ = max_position_embeddings
a_ = sinusoidal_pos_embds
a_ = n_layers
a_ = n_heads
a_ = dim
a_ = hidden_dim
a_ = dropout
a_ = attention_dropout
a_ = activation
a_ = initializer_range
a_ = qa_dropout
a_ = seq_classif_dropout
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = """ibert"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=False , __UpperCAmelCase="none" , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = quant_mode
a_ = force_dequant
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 366
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 0
|
"""simple docstring"""
def UpperCamelCase ( ) ->str:
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 1
a_ = 2
while i * i <= n:
a_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 367
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 368
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 0
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def UpperCamelCase ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def UpperCamelCase ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
a_ = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a_ = {}
for character in lower_case_input_str:
a_ = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
a_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("\nFor string = " , UpperCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCamelCase_ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
UpperCamelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """ctrl"""
a_ : Tuple = ["""past_key_values"""]
a_ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=24_65_34 , __UpperCAmelCase=2_56 , __UpperCAmelCase=12_80 , __UpperCAmelCase=81_92 , __UpperCAmelCase=48 , __UpperCAmelCase=16 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) ->Tuple:
a_ = vocab_size
a_ = n_positions
a_ = n_embd
a_ = n_layer
a_ = n_head
a_ = dff
a_ = resid_pdrop
a_ = embd_pdrop
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = use_cache
super().__init__(**__UpperCAmelCase)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((a_) , (a_)) = extended_euclid(UpperCAmelCase , a % b )
a_ = a // b
return (y, x - k * y)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
((a_) , (a_)) = extended_euclid(UpperCAmelCase , UpperCAmelCase )
a_ = na * na
a_ = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
((a_) , (a_)) = extended_euclid(UpperCAmelCase , UpperCAmelCase )
if b < 0:
a_ = (b % n + n) % n
return b
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ , a_ = invert_modulo(UpperCAmelCase , UpperCAmelCase ), invert_modulo(UpperCAmelCase , UpperCAmelCase )
a_ = na * na
a_ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = BartphoTokenizer
a_ : Optional[Any] = False
a_ : Dict = True
def UpperCAmelCase__ ( self) ->Optional[Any]:
super().setUp()
a_ = ["▁This", "▁is", "▁a", "▁t", "est"]
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = {"unk_token": "<unk>"}
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''')
a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = "This is a là test"
a_ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase__ ( self) ->int:
a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map)
a_ = "This is a là test"
a_ = "▁This ▁is ▁a ▁l à ▁t est".split()
a_ = tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a_ = tokens + [tokenizer.unk_token]
a_ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
| 350
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 0
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase_ = HfApi()
UpperCamelCase_ = {}
# fmt: off
UpperCamelCase_ = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
UpperCamelCase_ = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
UpperCamelCase_ = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
UpperCamelCase_ = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
UpperCamelCase_ = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
UpperCamelCase_ = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
UpperCamelCase_ = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
UpperCamelCase_ = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
UpperCamelCase_ = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
UpperCamelCase_ = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
UpperCamelCase_ = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
UpperCamelCase_ = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
UpperCamelCase_ = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
UpperCamelCase_ = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
UpperCamelCase_ = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
UpperCamelCase_ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase_ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 351
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 0
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCamelCase_ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCamelCase_ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]:
a_ = spearmanr(__UpperCAmelCase , __UpperCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base")
a_ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a_ = model(__UpperCAmelCase)["last_hidden_state"]
a_ = tf.TensorShape((1, 10, 7_68))
self.assertEqual(output.shape , __UpperCAmelCase)
# compare the actual values for a slice.
a_ = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 353
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 0
|
"""simple docstring"""
UpperCamelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 354
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __lt__( self , __UpperCAmelCase) ->List[Any]:
return self[-1] < other[-1]
def __eq__( self , __UpperCAmelCase) ->Dict:
return self[-1] == other[-1]
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
a_ = []
# sort into stacks
for element in collection:
a_ = Stack([element] )
a_ = bisect_left(UpperCAmelCase , UpperCAmelCase )
if i != len(UpperCAmelCase ):
stacks[i].append(UpperCAmelCase )
else:
stacks.append(UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
a_ = merge(*(reversed(UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 356
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) ->List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_rescale
a_ = rescale_factor
a_ = do_pad
def UpperCAmelCase__ ( self) ->str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False) ->Dict:
if not batched:
a_ = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size["shortest_edge"] * h / w)
a_ = self.size["shortest_edge"]
elif w > h:
a_ = self.size["shortest_edge"]
a_ = int(self.size["shortest_edge"] * w / h)
else:
a_ = self.size["shortest_edge"]
a_ = self.size["shortest_edge"]
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a_ = max(__UpperCAmelCase , key=lambda __UpperCAmelCase: item[0])[0]
a_ = max(__UpperCAmelCase , key=lambda __UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self) ->Any:
a_ = DetaImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) ->int:
a_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_pad"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33})
self.assertEqual(image_processor.do_pad , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
pass
def UpperCAmelCase__ ( self) ->Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self) ->int:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self) ->Optional[int]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self) ->str:
# prepare image and target
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
a_ = json.loads(f.read())
a_ = {"image_id": 3_97_69, "annotations": target}
# encode them
a_ = DetaImageProcessor()
a_ = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt")
# verify pixel values
a_ = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4))
# verify area
a_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase))
# verify boxes
a_ = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3))
# verify image_id
a_ = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase))
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase))
# verify class_labels
a_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase))
# verify orig_size
a_ = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase))
# verify size
a_ = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase))
@slow
def UpperCAmelCase__ ( self) ->Tuple:
# prepare image, target and masks_path
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
a_ = json.loads(f.read())
a_ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
a_ = DetaImageProcessor(format="coco_panoptic")
a_ = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt")
# verify pixel values
a_ = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4))
# verify area
a_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase))
# verify boxes
a_ = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3))
# verify image_id
a_ = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase))
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase))
# verify class_labels
a_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase))
# verify masks
a_ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase)
# verify orig_size
a_ = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase))
# verify size
a_ = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase))
| 357
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 0
|
"""simple docstring"""
import operator
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None ) ->list:
"""simple docstring"""
a_ = operator.lt if reverse else operator.gt
a_ = solution or []
if not arr:
return solution
a_ = [arr.pop(0 )]
for i, item in enumerate(UpperCAmelCase ):
if _operator(UpperCAmelCase , sublist[-1] ):
sublist.append(UpperCAmelCase )
arr.pop(UpperCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(UpperCAmelCase )
else:
while sublist:
a_ = sublist.pop(0 )
for i, xx in enumerate(UpperCAmelCase ):
if not _operator(UpperCAmelCase , UpperCAmelCase ):
solution.insert(UpperCAmelCase , UpperCAmelCase )
break
else:
solution.append(UpperCAmelCase )
strand_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 358
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
requires_backends(self , ["transformers", "torch", "note_seq"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
requires_backends(cls , ["transformers", "torch", "note_seq"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
requires_backends(cls , ["transformers", "torch", "note_seq"])
| 359
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCamelCase_ = 'us-east-1' # defaults region
@dataclass
class snake_case :
a_ : str
a_ : List[Any] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
a_ : str = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
a_ : Optional[int] = {**hyperparameters, """max_steps""": 1000}
@property
def UpperCAmelCase__ ( self) ->str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase__ ( self) ->str:
return F'''{self.framework}-transfromers-test'''
@property
def UpperCAmelCase__ ( self) ->str:
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCAmelCase__ ( self) ->str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ) ->Any:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
a_ = q_groups
a_ = k_groups
a_ = v_groups
a_ = post_attention_groups
a_ = intermediate_groups
a_ = output_groups
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) ->int:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = SqueezeBertModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = SqueezeBertForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
a_ = self.num_labels
a_ = SqueezeBertForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = self.num_labels
a_ = SqueezeBertForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = self.num_choices
a_ = SqueezeBertForMultipleChoice(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self) ->Any:
a_ = self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_) , (a_) , (a_)) = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ : Union[str, Any] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = False
a_ : Tuple = True
a_ : Optional[int] = False
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = SqueezeBertModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , dim=37)
def UpperCAmelCase__ ( self) ->Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->int:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = SqueezeBertModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
a_ = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size((1, 3))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]])
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4))
| 361
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 0
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = []
for line in lines:
a_ = re.sub(r"#.*" , "" , UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(UpperCAmelCase )
a_ = "\n".join(UpperCAmelCase )
# Make a hash from all this code
a_ = full_str.encode("utf-8" )
return shaaaa(UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase_ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase_ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase_ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 362
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
for char in word:
a_ = ord(UpperCAmelCase )
if not _is_chinese_char(UpperCAmelCase ):
return 0
return 1
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(UpperCAmelCase ) > 1 and is_chinese(UpperCAmelCase )
if chinese_word:
word_set.add(UpperCAmelCase )
a_ = list(UpperCAmelCase )
return word_list
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , UpperCAmelCase )
for i in range(UpperCAmelCase , 1 , -1 ):
a_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = "##" + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = []
for i in range(0 , len(UpperCAmelCase ) , 100 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
a_ = [get_chinese_word(UpperCAmelCase ) for r in res]
ltp_res.extend(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
a_ = []
for i in range(0 , len(UpperCAmelCase ) , 100 ):
a_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase , truncation=UpperCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(UpperCAmelCase , UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase )
input_tokens.append(UpperCAmelCase )
a_ = add_sub_symbol(UpperCAmelCase , UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase ) == 1 and _is_chinese_char(ord(UpperCAmelCase ) ):
ref_id.append(UpperCAmelCase )
ref_ids.append(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
return ref_ids
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a_ = [json.dumps(UpperCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase_ = parser.parse_args()
main(args)
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.