code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def a__ ( a__ = 10**9 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 0
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCamelCase ( _UpperCAmelCase ):
@require_torch
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCamelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCamelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCamelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(UpperCAmelCase_ )
BertModel.from_pretrained(UpperCAmelCase_ )
BertTokenizer.from_pretrained(UpperCAmelCase_ )
pipeline(task="fill-mask" , model=UpperCAmelCase_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCamelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = "1"
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case_ (self ) -> List[str]:
UpperCamelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCamelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCamelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCamelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(UpperCAmelCase_ )
BertModel.from_pretrained(UpperCAmelCase_ )
BertTokenizer.from_pretrained(UpperCAmelCase_ )
pipeline(task="fill-mask" , model=UpperCAmelCase_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case_ (self ) -> Any:
UpperCamelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCamelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCamelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
UpperCamelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = "1"
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case_ (self ) -> int:
UpperCamelCase = "\nfrom transformers import pipeline\n "
UpperCamelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCamelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCamelCase = self.get_env()
UpperCamelCase = "1"
UpperCamelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = "\nfrom transformers import AutoModel\n "
UpperCamelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCamelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCamelCase = self.get_env()
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase = "1"
UpperCamelCase = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 153
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : str ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = len(snake_case__ ) # No of vertices in graph
UpperCAmelCase : Tuple = [0] * n
UpperCAmelCase : str = [False] * n
def dfs(UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str ):
UpperCAmelCase : str = True
UpperCAmelCase : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case__ , snake_case__ , snake_case__ , id_ )
UpperCAmelCase : Tuple = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Optional[Any] = min(low[at] , low[to] )
UpperCAmelCase : int = []
for i in range(snake_case__ ):
if not visited[i]:
dfs(snake_case__ , -1 , snake_case__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__a = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
return 12
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return 12
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
return 32
@property
def lowerCAmelCase_ ( self: Any ) -> Dict:
torch.manual_seed(0 )
snake_case_ :Tuple = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
torch.manual_seed(0 )
snake_case_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_ )
@property
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case_ :Any = 12
snake_case_ :Dict = 12
snake_case_ :Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ :Dict = TransformeraDModel(**UpperCAmelCase_ )
return model
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Tuple = """cpu"""
snake_case_ :Dict = self.dummy_vqvae
snake_case_ :int = self.dummy_text_encoder
snake_case_ :List[str] = self.dummy_tokenizer
snake_case_ :int = self.dummy_transformer
snake_case_ :List[Any] = VQDiffusionScheduler(self.num_embed )
snake_case_ :List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_ )
snake_case_ :List[str] = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
snake_case_ :int = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ :Tuple = """teddy bear playing in the pool"""
snake_case_ :Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
snake_case_ :Tuple = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""np""" )
snake_case_ :int = output.images
snake_case_ :Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
snake_case_ :Tuple = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type="""np""" , return_dict=UpperCAmelCase_ , num_inference_steps=2 )[0]
snake_case_ :List[str] = image[0, -3:, -3:, -1]
snake_case_ :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ :List[str] = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Any = """cpu"""
snake_case_ :Optional[int] = self.dummy_vqvae
snake_case_ :int = self.dummy_text_encoder
snake_case_ :Union[str, Any] = self.dummy_tokenizer
snake_case_ :str = self.dummy_transformer
snake_case_ :str = VQDiffusionScheduler(self.num_embed )
snake_case_ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ :Optional[int] = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
snake_case_ :Union[str, Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ :Optional[Any] = """teddy bear playing in the pool"""
snake_case_ :List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
snake_case_ :List[str] = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""np""" )
snake_case_ :Union[str, Any] = output.images
snake_case_ :List[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
snake_case_ :str = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type="""np""" , return_dict=UpperCAmelCase_ , num_inference_steps=2 )[0]
snake_case_ :Optional[int] = image[0, -3:, -3:, -1]
snake_case_ :str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ :Union[str, Any] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_ :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ :Optional[int] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ :Optional[int] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ :int = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
snake_case_ :Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ :Dict = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 66
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = s.rsplit(snake_case__, snake_case__ )
return new.join(snake_case__ )
def A__ ( __lowerCamelCase ):
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE_ = key.replace(F'''{group_key}.''', F'''{group_key}.group.''' )
if "res_path" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''res_path.''', '''res_path.path.''' )
if key.endswith('''.w''' ):
SCREAMING_SNAKE_CASE_ = rreplace(snake_case__, '''.w''', '''.weight''', 1 )
if key.endswith('''.b''' ):
SCREAMING_SNAKE_CASE_ = rreplace(snake_case__, '''.b''', '''.bias''', 1 )
SCREAMING_SNAKE_CASE_ = value.float()
return upgrade
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=True ):
from dall_e import Encoder
SCREAMING_SNAKE_CASE_ = Encoder()
if os.path.exists(snake_case__ ):
SCREAMING_SNAKE_CASE_ = torch.load(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__, snake_case__ ):
SCREAMING_SNAKE_CASE_ = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
SCREAMING_SNAKE_CASE_ = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE_ = FlavaImageCodebook(snake_case__ ).eval()
SCREAMING_SNAKE_CASE_ = encoder.state_dict()
SCREAMING_SNAKE_CASE_ = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
SCREAMING_SNAKE_CASE_ = hf_model.state_dict()
SCREAMING_SNAKE_CASE_ = count_parameters(snake_case__ )
SCREAMING_SNAKE_CASE_ = count_parameters(snake_case__ )
assert torch.allclose(snake_case__, snake_case__, atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__UpperCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 299
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 0
|
import heapq
import sys
import numpy as np
UpperCAmelCase__ = tuple[int, int]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) ->str:
"""simple docstring"""
a = []
a = set()
def __lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
return len(self.elements ) == 0
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCAmelCase_ )
else:
# update
# print("update", item)
a = []
((a) , (a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((a) , (a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[int] ) ->List[str]:
"""simple docstring"""
if item in self.set:
self.set.remove(UpperCAmelCase_ )
a = []
((a) , (a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((a) , (a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
return self.elements[0][1]
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
((a) , (a)) = heapq.heappop(self.elements )
self.set.remove(UpperCAmelCase_ )
return (priority, item)
def _a ( a :Optional[Any] , a :List[str] ) -> Optional[int]:
a = np.array(snake_case__ )
a = np.array(snake_case__ )
return np.linalg.norm(a - b )
def _a ( a :List[str] , a :Tuple ) -> Union[str, Any]:
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def _a ( a :int , a :Union[str, Any] ) -> Optional[Any]:
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _a ( a :Optional[Any] , a :Optional[Any] , a :Union[str, Any] , a :str ) -> Optional[Any]:
a = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def _a ( a :List[str] , a :List[str] , a :int ) -> Dict:
a = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
a = '''*'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
a = '''#'''
a = '''-'''
a = back_pointer[goal]
while x != start:
((a) , (a)) = x
# print(x)
a = '''-'''
a = back_pointer[x]
a = '''-'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
a = back_pointer[goal]
while x != start:
print(snake_case__ , end=''' ''' )
a = back_pointer[x]
print(snake_case__ )
sys.exit()
def _a ( a :Union[str, Any] ) -> Optional[int]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _a ( a :str , a :Optional[Any] , a :Tuple , a :Optional[Any] , a :int , a :Optional[int] , a :Optional[Any] , a :Optional[int] , ) -> Tuple:
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((a) , (a)) = s
a = (x - 1, y)
a = (x + 1, y)
a = (x, y + 1)
a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
a = -1
a = float('''inf''' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
a = g_function[s] + 1
a = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def _a ( ) -> int:
a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCAmelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCAmelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCAmelCase__ = make_common_ground()
UpperCAmelCase__ = blocks_blk
# hyper parameters
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = 20
UpperCAmelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (n - 1, n - 1)
UpperCAmelCase__ = 1
def _a ( a :Union[str, Any] , a :Optional[int] , a :int ) -> Any:
a = {start: 0, goal: float('''inf''' )}
a = {start: -1, goal: -1}
a = []
a = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
a = []
a = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
a , a = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
a = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = 'laion/clap-htsat-unfused'
lowercase = tempfile.mkdtemp()
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_feature_extractor()
lowercase = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowercase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase = self.get_feature_extractor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowercase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowercase = floats_list((3, 1_0_0_0) )
lowercase = feature_extractor(UpperCAmelCase_ , return_tensors='np' )
lowercase = processor(audios=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self ):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowercase = 'This is a test string'
lowercase = processor(text=UpperCAmelCase_ )
lowercase = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(UpperCAmelCase_ )
lowercase = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 220
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( lowercase__ ):
if isinstance(snake_case__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
def UpperCamelCase__ ( self , A_ , A_ ) ->Dict:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCAmelCase_ , UpperCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : int = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
__lowerCAmelCase : str = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Dict = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : Any = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
__lowerCAmelCase : List[Any] = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
__lowerCAmelCase : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Any = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
__lowerCAmelCase : int = after_output[0]
__lowerCAmelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1e-3 )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Tuple = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : str = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
__lowerCAmelCase : Tuple = model(
input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
__lowerCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : str = to_atuple(vision_model.config.image_size )
__lowerCAmelCase : Tuple = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase : str = output.text_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
pt_model.to(UpperCAmelCase_ )
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Optional[Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : str = pt_model(**UpperCAmelCase_ ).to_tuple()
__lowerCAmelCase : int = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
pt_model_loaded.to(UpperCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : str = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output_loaded.numpy() , 4e-2 )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel(UpperCAmelCase_ )
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = fx_state
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderModel(UpperCAmelCase_ )
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCAmelCase_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCAmelCase_ )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**UpperCAmelCase_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCAmelCase_ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase : Tuple = config_inputs_dict.pop('''vision_config''' )
__lowerCAmelCase : Optional[Any] = config_inputs_dict.pop('''text_config''' )
__lowerCAmelCase : str = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.check_equivalence_flax_to_pt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : List[Any] = model_a(**UpperCAmelCase_ )
__lowerCAmelCase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : Any = model_a(**UpperCAmelCase_ )
__lowerCAmelCase : str = after_outputs[0]
__lowerCAmelCase : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1e-5 )
@require_flax
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
__lowerCAmelCase : int = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowerCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowerCAmelCase : str = random_attention_mask([batch_size, 4] )
__lowerCAmelCase : str = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = FlaxViTModel(UpperCAmelCase_ )
__lowerCAmelCase : str = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = FlaxViTModelTester(self )
__lowerCAmelCase : Optional[int] = FlaxBertModelTester(self )
__lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase : str = vision_config_and_inputs
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
__lowerCAmelCase : List[Any] = 13
__lowerCAmelCase : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowerCAmelCase : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowerCAmelCase : Dict = random_attention_mask([batch_size, 4] )
__lowerCAmelCase : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = FlaxCLIPVisionModel(UpperCAmelCase_ )
__lowerCAmelCase : Dict = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = FlaxCLIPVisionModelTester(self )
__lowerCAmelCase : Any = FlaxBertModelTester(self )
__lowerCAmelCase : Any = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase : int = vision_config_and_inputs
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
__lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowerCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCAmelCase : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='''np''' )
__lowerCAmelCase : Dict = model(**UpperCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : Tuple = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCAmelCase_ , atol=1e-3 ) )
| 275
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_A : List[str] =logging.get_logger(__name__)
_A : Tuple ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : str ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : Dict ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : int ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[int] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_A : str ={
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_A : Optional[int] ={
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_A : List[str] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A : str ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A : Dict ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _lowercase ( _UpperCAmelCase ):
a = VOCAB_FILES_NAMES
a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowercase ( _UpperCAmelCase ):
a = VOCAB_FILES_NAMES
a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Optional[int] =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_A : Union[str, Any] =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_A : Optional[Any] =r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_UpperCAmelCase )
class _lowercase :
def __call__( self: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Union[bool, str] = False , UpperCamelCase__: Union[bool, str] = False , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: Optional[bool] = None , **UpperCamelCase__: int , ):
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
elif titles is None or texts is None:
lowerCamelCase__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__ : List[str] = titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [titles]
lowerCamelCase__ : int = texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [texts]
lowerCamelCase__ : str = len(UpperCAmelCase_ )
lowerCamelCase__ : List[str] = questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts.''' )
lowerCamelCase__ : List[str] = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""]
lowerCamelCase__ : List[str] = super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""]
lowerCamelCase__ : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
lowerCamelCase__ : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase__ : List[Any] = attention_mask
return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: BatchEncoding , UpperCamelCase__: DPRReaderOutput , UpperCamelCase__: int = 16 , UpperCamelCase__: int = 64 , UpperCamelCase__: int = 4 , ):
lowerCamelCase__ : str = reader_input["""input_ids"""]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = reader_output[:3]
lowerCamelCase__ : List[Any] = len(UpperCAmelCase_ )
lowerCamelCase__ : Any = sorted(range(UpperCAmelCase_ ) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__ )
lowerCamelCase__ : List[str] = []
for doc_id in sorted_docs:
lowerCamelCase__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase__ : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase__ : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase__ : List[str] = len(UpperCAmelCase_ )
lowerCamelCase__ : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , UpperCamelCase__: int , UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase__ : List[str] = sorted(UpperCAmelCase_ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCAmelCase_ )
lowerCamelCase__ : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
lowerCamelCase__ : Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
a = VOCAB_FILES_NAMES
a = READER_PRETRAINED_VOCAB_FILES_MAP
a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = READER_PRETRAINED_INIT_CONFIGURATION
a = ["input_ids", "attention_mask"]
| 41
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a ):
assert isinstance(snake_case__ , snake_case__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
snake_case_ : Optional[int] = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case__ )
else:
snake_case_ : Optional[Any] = sylvester(number - 1 )
snake_case_ : Tuple = num - 1
snake_case_ : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 327
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=30 , UpperCamelCase_: str=4_00 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[int]=1 / 2_55 , UpperCamelCase_: int=True , UpperCamelCase_: Optional[Any]=[0.5, 0.5, 0.5] , UpperCamelCase_: Any=[0.5, 0.5, 0.5] , UpperCamelCase_: List[Any]=True , ):
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_pad
def lowerCAmelCase__ ( self: Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Any=False ):
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
__lowerCamelCase, __lowerCamelCase = image.size
else:
__lowerCamelCase, __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = self.size["""shortest_edge"""]
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase, __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(UpperCAmelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
__lowerCamelCase = max(UpperCAmelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__( _UpperCAmelCase , unittest.TestCase):
UpperCAmelCase__ : int = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = DetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_rescale""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_pad""" ) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
__lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
pass
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
__lowerCamelCase = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__lowerCamelCase = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__lowerCamelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
__lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
__lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCamelCase = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__lowerCamelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
__lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
__lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 12
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 0
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__SCREAMING_SNAKE_CASE = input_file.read()
__SCREAMING_SNAKE_CASE = regexp.search(UpperCAmelCase_ )
return match
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__SCREAMING_SNAKE_CASE = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__SCREAMING_SNAKE_CASE = regexp.finditer(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Path("""./datasets""" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Path("""./datasets""" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 267
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase__ = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCamelCase = []
for num in range(len(snake_case__ ) ):
UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def a__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = {}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCAmelCase_ )
if nodea not in self.connections:
self.add_node(UpperCAmelCase_ )
UpperCAmelCase : Dict = probability
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return list(self.connections )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Any = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
UpperCAmelCase : str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[Any] = Counter(graph.get_nodes() )
UpperCAmelCase : List[str] = start
for _ in range(snake_case__ ):
UpperCAmelCase : Union[str, Any] = graph.transition(snake_case__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def A_ ( ):
'''simple docstring'''
snake_case_ :str = os.path.dirname(os.path.realpath(snake_case__ ) )
snake_case_ :Tuple = os.path.join(snake_case__, """words.txt""" )
snake_case_ :str = """"""
with open(snake_case__ ) as f:
snake_case_ :int = f.readline()
snake_case_ :Optional[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
snake_case_ :Optional[int] = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 66
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).tokenizer
def _UpperCamelCase ( self , **_A ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def _UpperCamelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(UpperCAmelCase_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=UpperCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 299
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase_ ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = "trajectory_transformer"
__snake_case = ["past_key_values"]
__snake_case = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any]=100 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Tuple=249 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[Any]=17 , __UpperCAmelCase : Tuple=25 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : int=4 , __UpperCAmelCase : str=128 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[str]=0.0006 , __UpperCAmelCase : Optional[int]=512 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : List[str]=1e-1_2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Union[str, Any]=50_256 , __UpperCAmelCase : Dict=50_256 , **__UpperCAmelCase : Dict , ) ->Tuple:
"""simple docstring"""
a = vocab_size
a = action_weight
a = reward_weight
a = value_weight
a = max_position_embeddings
a = block_size
a = action_dim
a = observation_dim
a = transition_dim
a = learning_rate
a = n_layer
a = n_head
a = n_embd
a = embd_pdrop
a = attn_pdrop
a = resid_pdrop
a = initializer_range
a = layer_norm_eps
a = kaiming_initializer_range
a = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : str = {'facebook/bart-base': BartForConditionalGeneration}
_UpperCamelCase : str = {'facebook/bart-base': BartTokenizer}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=snake_case__ , default=snake_case__ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=snake_case__ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=snake_case__ , default=snake_case__ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--config_name' , type=snake_case__ , default=snake_case__ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=snake_case__ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=snake_case__ , default=snake_case__ , help='Where to store the final ONNX file.' )
lowercase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple , __snake_case : List[Any]="cpu" ):
'''simple docstring'''
lowercase = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ )
lowercase = tokenizer_dict[model_name].from_pretrained(snake_case__ )
if model_name in ["facebook/bart-base"]:
lowercase = 0
lowercase = None
lowercase = 0
return huggingface_model, tokenizer
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
model.eval()
lowercase = None
lowercase = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) )
with torch.no_grad():
lowercase = 'My friends are cool but they eat too many carbs.'
lowercase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='pt' ).to(model.device )
lowercase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case__ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case__ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=snake_case__ , )
logger.info('Model exported to {}'.format(snake_case__ ) )
lowercase = remove_dup_initializers(os.path.abspath(snake_case__ ) )
logger.info('Deduplicated and optimized model written to {}'.format(snake_case__ ) )
lowercase = onnxruntime.InferenceSession(snake_case__ )
lowercase = ort_sess.run(
snake_case__ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(snake_case__ ),
'max_length': np.array(snake_case__ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = parse_args()
lowercase = 5
lowercase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase = torch.device(args.device )
lowercase , lowercase = load_model_tokenizer(args.model_name_or_path , snake_case__ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(snake_case__ )
if args.max_length:
lowercase = args.max_length
if args.num_beams:
lowercase = args.num_beams
if args.output_file_path:
lowercase = args.output_file_path
else:
lowercase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 220
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 0
|
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = generate_pascal_triangle(snake_case__ )
for row_idx in range(snake_case__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def _lowercase ( lowercase__ ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__lowerCAmelCase : Tuple = []
for current_row_idx in range(snake_case__ ):
__lowerCAmelCase : Optional[Any] = populate_current_row(snake_case__ , snake_case__ )
triangle.append(snake_case__ )
return triangle
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase, __lowerCAmelCase : Dict = 1, 1
for current_col_idx in range(1 , snake_case__ ):
calculate_current_element(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return current_row
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase : List[str] = triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase : Optional[int] = above_to_left_elt + above_to_right_elt
def _lowercase ( lowercase__ ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__lowerCAmelCase : Optional[Any] = [[1]]
for row_index in range(1 , snake_case__ ):
__lowerCAmelCase : Tuple = [0] + result[-1] + [0]
__lowerCAmelCase : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase : Union[str, Any] = sum(divmod(snake_case__ , 2 ) )
__lowerCAmelCase : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__lowerCAmelCase : Dict = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase : Optional[int] = row_first_half + row_second_half
result.append(snake_case__ )
return result
def _lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
__lowerCAmelCase : Dict = f"""{func.__name__}({value})"""
__lowerCAmelCase : Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case__ , snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 275
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 0
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_A : str =logging.getLogger(__name__)
@dataclass
class _lowercase ( _UpperCAmelCase ):
a = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a = field(default=_UpperCAmelCase , metadata={"""help""": """Whether to SortishSamler or not."""} )
a = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a = field(default=_UpperCAmelCase , metadata={"""help""": """whether to use adafactor"""} )
a = field(
default=_UpperCAmelCase , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a = field(
default=_UpperCAmelCase , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a = field(default=_UpperCAmelCase , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a = field(
default=_UpperCAmelCase , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a = field(
default="""linear""" , metadata={"""help""": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 41
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
__magic_name__: int = "nllb-moe"
__magic_name__: Any = ["past_key_values"]
__magic_name__: Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , _A : List[Any]=128112 , _A : Any=1024 , _A : Dict=12 , _A : int=4096 , _A : Any=16 , _A : str=12 , _A : Any=4096 , _A : Union[str, Any]=16 , _A : Dict=0.0_5 , _A : Union[str, Any]=0.0_5 , _A : Union[str, Any]=True , _A : str=True , _A : str="relu" , _A : Optional[Any]=1024 , _A : Any=0.1 , _A : List[Any]=0.1 , _A : int=0.0 , _A : str=0.0_2 , _A : Any=2 , _A : List[str]=True , _A : Any=False , _A : Optional[Any]="float32" , _A : Dict=False , _A : str=128 , _A : Dict=64 , _A : int=4 , _A : str=4 , _A : List[Any]=0.0_0_1 , _A : int=0.0_0_1 , _A : Tuple="all" , _A : Union[str, Any]=False , _A : str=False , _A : Tuple=1.0 , _A : List[Any]=0.2 , _A : str=1 , _A : List[Any]=0 , _A : List[str]=2 , _A : Optional[int]=False , **_A : List[str] , ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Union[str, Any] = d_model
snake_case_ : Dict = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : List[str] = encoder_attention_heads
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : Any = decoder_layers
snake_case_ : str = decoder_attention_heads
snake_case_ : int = dropout
snake_case_ : Tuple = attention_dropout
snake_case_ : List[str] = activation_dropout
snake_case_ : Tuple = activation_function
snake_case_ : Any = init_std
snake_case_ : Dict = encoder_layerdrop
snake_case_ : List[str] = decoder_layerdrop
snake_case_ : List[str] = use_cache
snake_case_ : Dict = encoder_layers
snake_case_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : List[Any] = router_z_loss_coef
snake_case_ : Any = router_aux_loss_coef
snake_case_ : List[str] = decoder_sparse_step
snake_case_ : List[str] = encoder_sparse_step
snake_case_ : int = num_experts
snake_case_ : List[str] = expert_capacity
snake_case_ : Dict = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
snake_case_ : Union[str, Any] = router_dtype
snake_case_ : Optional[Any] = router_ignore_padding_tokens
snake_case_ : int = batch_prioritized_routing
snake_case_ : str = second_expert_policy
snake_case_ : Optional[Any] = normalize_router_prob_before_dropping
snake_case_ : Union[str, Any] = moe_eval_capacity_token_fraction
snake_case_ : Optional[int] = moe_token_dropout
snake_case_ : Optional[int] = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 327
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 0
|
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return lst
__lowerCamelCase = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCamelCase, __lowerCamelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCamelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 12
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _lowerCamelCase :
UpperCAmelCase_ = 42 # [batch_size x 3]
UpperCAmelCase_ = 42 # [batch_size x 3]
UpperCAmelCase_ = 42 # [batch_size x 3]
UpperCAmelCase_ = 42 # [batch_size x 3]
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def snake_case_ (self ) -> Union[str, Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def snake_case_ (self ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def snake_case_ (self ) -> str:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def snake_case_ (self ) -> str:
UpperCamelCase = torch.arange(self.height * self.width )
UpperCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def snake_case_ (self ) -> str:
UpperCamelCase , *UpperCamelCase = self.shape
UpperCamelCase = int(np.prod(UpperCAmelCase_ ) )
UpperCamelCase = self.get_image_coords()
UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCamelCase = self.get_camera_rays(UpperCAmelCase_ )
UpperCamelCase = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def snake_case_ (self , __a ) -> List[str]:
UpperCamelCase , *UpperCamelCase , UpperCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase = coords.view(UpperCAmelCase_ , -1 , 2 )
UpperCamelCase = self.resolution()
UpperCamelCase = self.fov()
UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase = fracs * torch.tan(fov / 2 )
UpperCamelCase = fracs.view(UpperCAmelCase_ , -1 , 2 )
UpperCamelCase = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
UpperCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def snake_case_ (self , __a , __a ) -> Union[str, Any]:
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCamelCase = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase = -z * 4
UpperCamelCase = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
UpperCamelCase = np.cross(snake_case__ , snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , width=snake_case__ , height=snake_case__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(snake_case__ )) , )
| 153
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 0
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _snake_case ( UpperCamelCase : List[str] ):
return EnvironmentCommand()
def _snake_case ( UpperCamelCase : str ):
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = accelerate_config_file
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """not installed"""
if is_safetensors_available():
import safetensors
UpperCAmelCase : Dict = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
UpperCAmelCase : List[str] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
UpperCAmelCase : Tuple = """not installed"""
UpperCAmelCase : List[str] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCAmelCase : str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCAmelCase : Union[str, Any] = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else F"\t{accelerate_config}"
)
UpperCAmelCase : Optional[Any] = """not installed"""
UpperCAmelCase : Optional[Any] = """NA"""
if is_torch_available():
import torch
UpperCAmelCase : List[Any] = torch.__version__
UpperCAmelCase : List[str] = torch.cuda.is_available()
UpperCAmelCase : Any = """not installed"""
UpperCAmelCase : Optional[Any] = """NA"""
if is_tf_available():
import tensorflow as tf
UpperCAmelCase : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
UpperCAmelCase : Union[str, Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCAmelCase : Any = bool(tf.config.list_physical_devices("""GPU""" ) )
UpperCAmelCase : Dict = """not installed"""
UpperCAmelCase : Optional[int] = """not installed"""
UpperCAmelCase : List[Any] = """not installed"""
UpperCAmelCase : str = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
UpperCAmelCase : Optional[Any] = flax.__version__
UpperCAmelCase : List[Any] = jax.__version__
UpperCAmelCase : Optional[Any] = jaxlib.__version__
UpperCAmelCase : List[Any] = jax.lib.xla_bridge.get_backend().platform
UpperCAmelCase : Union[str, Any] = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase_ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 109
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__a = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__a = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = "maskformer"
_A : Union[str, Any] = {"hidden_size": "mask_feature_size"}
_A : Any = ["resnet", "swin"]
_A : List[Any] = ["detr"]
def __init__( self: Optional[int] , snake_case: int = 256 , snake_case: int = 256 , snake_case: float = 0.1 , snake_case: bool = False , snake_case: Optional[Dict] = None , snake_case: Optional[Dict] = None , snake_case: float = 0.0_2 , snake_case: float = 1.0 , snake_case: float = 1.0 , snake_case: float = 1.0 , snake_case: float = 2_0.0 , snake_case: Optional[bool] = None , **snake_case: Optional[int] , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case_ :Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ :Tuple = backbone_config.pop("""model_type""" )
snake_case_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ :Optional[int] = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case_ :Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
snake_case_ :Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ :List[str] = CONFIG_MAPPING[decoder_type]
snake_case_ :str = config_class.from_dict(UpperCAmelCase_ )
snake_case_ :Dict = backbone_config
snake_case_ :Any = decoder_config
# main feature dimension for the model
snake_case_ :Dict = fpn_feature_size
snake_case_ :Optional[Any] = mask_feature_size
# initializer
snake_case_ :Any = init_std
snake_case_ :Optional[Any] = init_xavier_std
# Hungarian matcher && loss
snake_case_ :List[Any] = cross_entropy_weight
snake_case_ :Any = dice_weight
snake_case_ :Tuple = mask_weight
snake_case_ :Optional[Any] = use_auxiliary_loss
snake_case_ :Tuple = no_object_weight
snake_case_ :int = output_auxiliary_logits
snake_case_ :List[Any] = self.decoder_config.encoder_attention_heads
snake_case_ :str = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def lowerCAmelCase_ ( cls: Union[str, Any] , snake_case: PretrainedConfig , snake_case: PretrainedConfig , **snake_case: Optional[Any] ) -> str:
return cls(
backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case_ :Tuple = copy.deepcopy(self.__dict__ )
snake_case_ :Tuple = self.backbone_config.to_dict()
snake_case_ :List[str] = self.decoder_config.to_dict()
snake_case_ :Optional[int] = self.__class__.model_type
return output
| 66
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
| 299
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 0
|
def _a ( a :Union[str, Any] , a :List[Any] , a :Tuple ) -> float:
a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a ( ) -> str:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = 3
lowercase = 2_5_0
lowercase = ids_tensor((batch_size, length) , UpperCAmelCase_ )
lowercase = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self ):
lowercase , lowercase = self._get_tensors(5 )
lowercase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase_ ( self ):
lowercase = MaxLengthCriteria(max_length=1_0 )
lowercase , lowercase = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase_ ( self ):
lowercase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowercase , lowercase = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase , lowercase = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def UpperCamelCase_ ( self ):
lowercase , lowercase = self._get_tensors(5 )
lowercase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase_ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowercase = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 220
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"tokenizer_file": "tokenizer.json"}
_UpperCamelCase = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = ["input_ids", "attention_mask"]
_UpperCamelCase = None
def __init__( self , A_=None , A_=None , A_=None , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_=False , A_=False , **A_ , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
__lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase_ ) != add_prefix_space:
__lowerCAmelCase : int = getattr(UpperCAmelCase_ , pre_tok_state.pop('''type''' ) )
__lowerCAmelCase : Optional[int] = add_prefix_space
__lowerCAmelCase : Optional[Any] = pre_tok_class(**UpperCAmelCase_ )
__lowerCAmelCase : Dict = add_prefix_space
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
__lowerCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 275
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _lowercase :
def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=13 , UpperCamelCase__: Optional[int]=7 , UpperCamelCase__: List[str]=False , UpperCamelCase__: str=True , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=33 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[Any]=5 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: Any=37 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: int=16 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Dict = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Optional[int] = num_labels
lowerCamelCase__ : List[str] = num_choices
lowerCamelCase__ : Dict = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: List[Any] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ):
lowerCamelCase__ : Dict = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ )
lowerCamelCase__ : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Optional[Any] = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict = config_and_inputs
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a = False
a = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a = ()
a = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = EsmModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self: int ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : Optional[int] = EsmEmbeddings(config=UpperCAmelCase_ )
lowerCamelCase__ : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase__ : Optional[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase__ : List[Any] = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : List[str] = EsmEmbeddings(config=UpperCAmelCase_ )
lowerCamelCase__ : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase__ : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase__ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase__ : str = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Dict ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Any ):
pass
@require_torch
class _lowercase ( _UpperCAmelCase ):
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
with torch.no_grad():
lowerCamelCase__ : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : List[str] = model(UpperCAmelCase_ )[0]
lowerCamelCase__ : Any = 33
lowerCamelCase__ : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
lowerCamelCase__ : str = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Dict ):
with torch.no_grad():
lowerCamelCase__ : str = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Dict = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Dict = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 41
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int
__magic_name__: Node | None = None
__magic_name__: Node | None = None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = Node(1 )
snake_case_ : List[str] = Node(2 )
snake_case_ : Optional[int] = Node(3 )
snake_case_ : Any = Node(4 )
snake_case_ : Any = Node(5 )
return tree
def SCREAMING_SNAKE_CASE__ ( __a ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __a ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE__ ( __a ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __a ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = []
if root is None:
return output
snake_case_ : Optional[int] = deque([root] )
while process_queue:
snake_case_ : str = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : str = []
def populate_output(__a , __a ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = []
def populate_output(__a , __a ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def SCREAMING_SNAKE_CASE__ ( __a ):
if root is None:
return []
snake_case_ : Optional[int] = []
snake_case_ : List[str] = 0
snake_case_ : Tuple = height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
snake_case_ : Dict = 1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
snake_case_ : List[Any] = 0
return output
def SCREAMING_SNAKE_CASE__ ( ): # Main function for testing.
snake_case_ : Tuple = make_tree()
print(f"""In-order Traversal: {inorder(snake_case__ )}""" )
print(f"""Pre-order Traversal: {preorder(snake_case__ )}""" )
print(f"""Post-order Traversal: {postorder(snake_case__ )}""" , '\n' )
print(f"""Height of Tree: {height(snake_case__ )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(snake_case__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 327
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] ):
'''simple docstring'''
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""String lengths must match!""" )
__lowerCamelCase = 0
for chara, chara in zip(snake_case__ , snake_case__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase : int = logging.getLogger(__name__)
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.argmax(snake_case__ , axis=1 )
return np.sum(outputs == labels )
def a__ ( a__ ):
"""simple docstring"""
with open(snake_case__ , encoding="""utf_8""" ) as f:
__SCREAMING_SNAKE_CASE = csv.reader(snake_case__ )
__SCREAMING_SNAKE_CASE = []
next(snake_case__ ) # skip the first line
for line in tqdm(snake_case__ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for dataset in encoded_datasets:
__SCREAMING_SNAKE_CASE = len(snake_case__ )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case__ ):
__SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
__SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = mc_label
__SCREAMING_SNAKE_CASE = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case__ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case__ , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=snake_case__ , default="""""" )
parser.add_argument("""--eval_dataset""" , type=snake_case__ , default="""""" )
parser.add_argument("""--seed""" , type=snake_case__ , default=42 )
parser.add_argument("""--num_train_epochs""" , type=snake_case__ , default=3 )
parser.add_argument("""--train_batch_size""" , type=snake_case__ , default=8 )
parser.add_argument("""--eval_batch_size""" , type=snake_case__ , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=snake_case__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=snake_case__ , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=snake_case__ , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=snake_case__ , default=6.2_5E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=snake_case__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=snake_case__ , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=snake_case__ , default=0.01 )
parser.add_argument("""--lm_coef""" , type=snake_case__ , default=0.9 )
parser.add_argument("""--n_valid""" , type=snake_case__ , default=3_74 )
parser.add_argument("""--server_ip""" , type=snake_case__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case__ , default="""""" , help="""Can be used for distant debugging.""" )
__SCREAMING_SNAKE_CASE = parser.parse_args()
print(snake_case__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(snake_case__ , snake_case__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__SCREAMING_SNAKE_CASE = ["""_start_""", """_delimiter_""", """_classify_"""]
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
__SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case__ ) )
model.to(snake_case__ )
# Load and encode the datasets
def tokenize_and_encode(a__ ):
if isinstance(snake_case__ , snake_case__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case__ ) )
elif isinstance(snake_case__ , snake_case__ ):
return obj
return [tokenize_and_encode(snake_case__ ) for o in obj]
logger.info("""Encoding dataset...""" )
__SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.train_dataset )
__SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.eval_dataset )
__SCREAMING_SNAKE_CASE = (train_dataset, eval_dataset)
__SCREAMING_SNAKE_CASE = tokenize_and_encode(snake_case__ )
# Compute the max input length for the Transformer
__SCREAMING_SNAKE_CASE = model.config.n_positions // 2 - 2
__SCREAMING_SNAKE_CASE = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__SCREAMING_SNAKE_CASE = min(snake_case__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__SCREAMING_SNAKE_CASE = pre_process_datasets(snake_case__ , snake_case__ , snake_case__ , *snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tensor_datasets[0], tensor_datasets[1]
__SCREAMING_SNAKE_CASE = TensorDataset(*snake_case__ )
__SCREAMING_SNAKE_CASE = RandomSampler(snake_case__ )
__SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.train_batch_size )
__SCREAMING_SNAKE_CASE = TensorDataset(*snake_case__ )
__SCREAMING_SNAKE_CASE = SequentialSampler(snake_case__ )
__SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__SCREAMING_SNAKE_CASE = args.max_steps
__SCREAMING_SNAKE_CASE = args.max_steps // (len(snake_case__ ) // args.gradient_accumulation_steps) + 1
else:
__SCREAMING_SNAKE_CASE = len(snake_case__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__SCREAMING_SNAKE_CASE = list(model.named_parameters() )
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__SCREAMING_SNAKE_CASE = AdamW(snake_case__ , lr=args.learning_rate , eps=args.adam_epsilon )
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
snake_case__ , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case__ )
if args.do_train:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(snake_case__ , desc="""Training""" )
for step, batch in enumerate(snake_case__ ):
__SCREAMING_SNAKE_CASE = tuple(t.to(snake_case__ ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch
__SCREAMING_SNAKE_CASE = model(snake_case__ , mc_token_ids=snake_case__ , lm_labels=snake_case__ , mc_labels=snake_case__ )
__SCREAMING_SNAKE_CASE = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__SCREAMING_SNAKE_CASE = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__SCREAMING_SNAKE_CASE = """Training loss: {:.2e} lr: {:.2e}""".format(snake_case__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__SCREAMING_SNAKE_CASE = model.module if hasattr(snake_case__ , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , snake_case__ )
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , snake_case__ )
torch.save(model_to_save.state_dict() , snake_case__ )
model_to_save.config.to_json_file(snake_case__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case__ )
if args.do_eval:
model.eval()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
for batch in tqdm(snake_case__ , desc="""Evaluating""" ):
__SCREAMING_SNAKE_CASE = tuple(t.to(snake_case__ ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(
snake_case__ , mc_token_ids=snake_case__ , lm_labels=snake_case__ , mc_labels=snake_case__ )
__SCREAMING_SNAKE_CASE = mc_logits.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE = mc_labels.to("""cpu""" ).numpy()
__SCREAMING_SNAKE_CASE = accuracy(snake_case__ , snake_case__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__SCREAMING_SNAKE_CASE = eval_loss / nb_eval_steps
__SCREAMING_SNAKE_CASE = eval_accuracy / nb_eval_examples
__SCREAMING_SNAKE_CASE = tr_loss / nb_tr_steps if args.do_train else None
__SCREAMING_SNAKE_CASE = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , """eval_results.txt""" )
with open(snake_case__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , snake_case__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 267
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
snake_case__ , padding="longest" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case__ ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config["num_epochs"] )
UpperCamelCase = int(config["seed"] )
UpperCamelCase = int(config["batch_size"] )
UpperCamelCase = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(_SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case__ )
UpperCamelCase , UpperCamelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**snake_case__ )
UpperCamelCase = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**snake_case__ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case__ , default=snake_case__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = PriorTransformer
__lowerCAmelCase : Optional[int] = "hidden_states"
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Optional[int] = 8
UpperCAmelCase : Union[str, Any] = 7
UpperCAmelCase : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase : List[str] = 4
UpperCAmelCase : int = 8
UpperCAmelCase : Union[str, Any] = 7
UpperCAmelCase : List[str] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : List[str] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
UpperCAmelCase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Tuple = self.model_class(**UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : List[str] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCAmelCase : Optional[int] = model.to(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCAmelCase : int = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase : str = model(**UpperCAmelCase_ )[0]
UpperCAmelCase : Dict = output[0, :5].flatten().cpu()
print(UpperCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = embedding_dim
UpperCAmelCase : Tuple = num_embeddings
UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
UpperCAmelCase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(UpperCAmelCase_ )
UpperCAmelCase : List[str] = self.get_dummy_seed_input(seed=UpperCAmelCase_ )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase : Dict = sample[0, :8].flatten().cpu()
print(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
| 109
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 0
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A_ ( _lowercase = 3 ):
'''simple docstring'''
if isinstance(snake_case__, snake_case__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
snake_case_ :str = QuantumRegister(snake_case__, """qr""" )
snake_case_ :List[Any] = ClassicalRegister(snake_case__, """cr""" )
snake_case_ :List[str] = QuantumCircuit(snake_case__, snake_case__ )
snake_case_ :str = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), snake_case__, snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__, snake_case__ )
# simulate with 10000 shots
snake_case_ :List[str] = Aer.get_backend("""qasm_simulator""" )
snake_case_ :Dict = execute(snake_case__, snake_case__, shots=10000 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 66
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if isinstance(snake_case__, snake_case__ ):
SCREAMING_SNAKE_CASE_ = np.full((len(snake_case__ ), sequence_length, 2), snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = np.full((len(snake_case__ ), sequence_length), snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__, snake_case__ ):
SCREAMING_SNAKE_CASE_ = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ = tensor[:sequence_length]
else:
if isinstance(snake_case__, snake_case__ ):
SCREAMING_SNAKE_CASE_ = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ = tensor[:sequence_length]
return out_tensor.tolist()
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
SCREAMING_SNAKE_CASE_ = unicodedata.category(snake_case__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =True
UpperCAmelCase_ =None
UpperCAmelCase_ =None
UpperCAmelCase_ =-100
UpperCAmelCase_ ="pt"
def _UpperCamelCase ( self , _A ) -> int:
import torch
SCREAMING_SNAKE_CASE_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE_ = torch.tensor(batch['''entity_ids'''] ).shape[1]
SCREAMING_SNAKE_CASE_ = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE_ = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE_ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
SCREAMING_SNAKE_CASE_ = [feature['''ner_tags'''] for feature in features]
SCREAMING_SNAKE_CASE_ = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = [feature['''original_entity_spans'''] for feature in features]
SCREAMING_SNAKE_CASE_ = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 299
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 0
|
from scipy.stats import spearmanr
import datasets
UpperCAmelCase__ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCAmelCase__ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n"
UpperCAmelCase__ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=False ) ->Union[str, Any]:
"""simple docstring"""
a = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 0
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=2 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_6 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1_0_0_0 , ):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = LayoutLMvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# text + image
lowercase = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
lowercase = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowercase = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowercase = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase = model(pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class a ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
UpperCAmelCase_ : int =False
UpperCAmelCase_ : Union[str, Any] =False
UpperCAmelCase_ : Tuple =False
UpperCAmelCase_ : Tuple =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return True
def UpperCamelCase_ ( self ):
lowercase = LayoutLMvaModelTester(self )
lowercase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7 )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowercase = copy.deepcopy(UpperCAmelCase_ )
if model_class in get_values(UpperCAmelCase_ ):
lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in get_values(UpperCAmelCase_ ):
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def UpperCamelCase_ ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
lowercase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase_ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase_ )
lowercase = torch.tensor([[1, 2]] )
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase = model(
input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , )
# verify the logits
lowercase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
lowercase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 220
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 0
|
def _lowercase ( lowercase__ ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__lowerCAmelCase : Dict = [True] * (num + 1)
__lowerCAmelCase : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case__ ):
__lowerCAmelCase : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 275
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_A : Dict =logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
def __init__( self: Tuple , *UpperCamelCase__: str , **UpperCamelCase__: Optional[int] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
self.check_model_type(UpperCAmelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple=None , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[Any]=None , **UpperCamelCase__: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = {}, {}
if padding is not None:
lowerCamelCase__ : str = padding
if truncation is not None:
lowerCamelCase__ : str = truncation
if top_k is not None:
lowerCamelCase__ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: List[str] , UpperCamelCase__: Union["Image.Image", str] , UpperCamelCase__: str = None , **UpperCamelCase__: str ):
if isinstance(UpperCAmelCase_ , (Image.Image, str) ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase__ : Tuple = {"""image""": image, """question""": question}
else:
lowerCamelCase__ : List[Any] = image
lowerCamelCase__ : Any = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
return results
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Optional[Any]=False ):
lowerCamelCase__ : Union[str, Any] = load_image(inputs["""image"""] )
lowerCamelCase__ : List[str] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
lowerCamelCase__ : Optional[int] = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase_ )
return model_inputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.model(**UpperCAmelCase_ )
return model_outputs
def lowerCamelCase_ ( self: Any , UpperCamelCase__: int , UpperCamelCase__: Any=5 ):
if top_k > self.model.config.num_labels:
lowerCamelCase__ : int = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase__ : List[str] = model_outputs.logits.sigmoid()[0]
lowerCamelCase__ , lowerCamelCase__ : str = probs.topk(UpperCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCamelCase__ : str = scores.tolist()
lowerCamelCase__ : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
| 41
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 0
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( __a ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 327
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('T')
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return (position - 1) // 2
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return (2 * position) + 1
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return (2 * position) + 2
class lowerCamelCase__( Generic[T]):
def __init__( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self: List[Any] ):
return self.elements
def __repr__( self: List[Any] ):
return str(self.heap )
def lowerCAmelCase__ ( self: Optional[Any] ):
return self.elements == 0
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: T , UpperCamelCase_: int ):
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(UpperCAmelCase_ )
def lowerCAmelCase__ ( self: str ):
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase, __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase, __lowerCamelCase = self.heap[0]
self._bubble_down(UpperCAmelCase_ )
return elem
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: T , UpperCamelCase_: int ):
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(UpperCAmelCase_ )
__lowerCamelCase, __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCAmelCase_ )
else:
self._bubble_down(UpperCAmelCase_ )
else:
self._bubble_down(UpperCAmelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: T ):
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(UpperCAmelCase_ )
__lowerCamelCase, __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase, __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ )
return self._bubble_up(UpperCAmelCase_ )
return None
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: T ):
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase, __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(UpperCAmelCase_ )
__lowerCamelCase = get_child_right_position(UpperCAmelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase, __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase, __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ )
return self._bubble_down(UpperCAmelCase_ )
if child_left_position < self.elements:
__lowerCamelCase, __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ )
return self._bubble_down(UpperCAmelCase_ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase, __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ )
return self._bubble_down(UpperCAmelCase_ )
return None
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase, __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCamelCase__( Generic[T]):
def __init__( self: Optional[Any] ):
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self: Union[str, Any] ):
return str(self.connections )
def __len__( self: Tuple ):
return self.nodes
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: T ):
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: T , UpperCamelCase_: T , UpperCamelCase_: int ):
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase__ ( A__ : Any , ):
'''simple docstring'''
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case__ , snake_case__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 12
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase : List[str] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ , repo_id="""test-model-flax""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 267
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( _UpperCAmelCase ):
UpperCAmelCase_ = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase_ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase_ = "image_qa"
UpperCAmelCase_ = AutoProcessor
UpperCAmelCase_ = AutoModelForVisualQuestionAnswering
UpperCAmelCase_ = ["image", "text"]
UpperCAmelCase_ = ["text"]
def __init__(self , *__a , **__a ) -> List[Any]:
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def snake_case_ (self , __a , __a ) -> Optional[Any]:
return self.pre_processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt" )
def snake_case_ (self , __a ) -> Optional[Any]:
with torch.no_grad():
return self.model(**UpperCAmelCase_ ).logits
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 153
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 109
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Any = MvpTokenizer
_A : Optional[Any] = MvpTokenizerFast
_A : Dict = True
_A : Any = filter_roberta_detectors
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
super().setUp()
snake_case_ :List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case_ :List[str] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case_ :List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ :Dict = {"""unk_token""": """<unk>"""}
snake_case_ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCAmelCase_ ( self: Optional[int] , **snake_case: Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCAmelCase_ ( self: Dict , **snake_case: Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCAmelCase_ ( self: str , snake_case: List[str] ) -> Optional[Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase_ ( self: int ) -> List[str]:
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase_ ( self: str ) -> str:
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
snake_case_ :int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ :Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :str = tokenizer(UpperCAmelCase_ , max_length=len(UpperCAmelCase_ ) , padding=UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ :Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case_ :Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :str = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , UpperCAmelCase_ )
self.assertIn("""attention_mask""" , UpperCAmelCase_ )
self.assertNotIn("""labels""" , UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase_ )
@require_torch
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case_ :str = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :Tuple = tokenizer(text_target=UpperCAmelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :Dict = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :List[str] = ["""A long paragraph for summarization."""]
snake_case_ :Tuple = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :int = tokenizer(UpperCAmelCase_ , text_target=UpperCAmelCase_ , return_tensors="""pt""" )
snake_case_ :Dict = inputs["""input_ids"""]
snake_case_ :Union[str, Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: str ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ :List[Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ :int = """A, <mask> AllenNLP sentence."""
snake_case_ :Dict = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
snake_case_ :List[str] = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case_ :str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case_ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 66
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =None
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
self.assertIsNotNone(UpperCAmelCase_ )
| 299
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 0
|
from __future__ import annotations
import math
def _a ( a :Dict ) -> list[int]:
if num <= 0:
a = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(snake_case__ )
a = [True] * (num + 1)
a = []
a = 2
a = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
a = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 220
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = 1
__lowerCAmelCase : Optional[Any] = 3
__lowerCAmelCase : Any = (32, 32)
__lowerCAmelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[int] = self.dummy_cond_unet_upscale
__lowerCAmelCase : Any = DDPMScheduler()
__lowerCAmelCase : int = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : str = self.dummy_vae
__lowerCAmelCase : List[str] = self.dummy_text_encoder
__lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : List[str] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
__lowerCAmelCase : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__lowerCAmelCase : Any = sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Union[str, Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase_ , )[0]
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase : List[Any] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : List[Any] = self.dummy_cond_unet_upscale
__lowerCAmelCase : Optional[Any] = DDPMScheduler()
__lowerCAmelCase : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : Any = self.dummy_vae
__lowerCAmelCase : Tuple = self.dummy_text_encoder
__lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
__lowerCAmelCase : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__lowerCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : Optional[int] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Dict = output.images
assert image.shape[0] == 2
__lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.dummy_cond_unet_upscale
__lowerCAmelCase : List[Any] = DDPMScheduler()
__lowerCAmelCase : str = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : List[str] = self.dummy_vae
__lowerCAmelCase : Any = self.dummy_text_encoder
__lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : int = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase : Optional[int] = unet.half()
__lowerCAmelCase : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
__lowerCAmelCase : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__lowerCAmelCase : str = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : str = sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
__lowerCAmelCase : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__lowerCAmelCase : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase : int = '''a cat sitting on a park bench'''
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='''np''' , )
__lowerCAmelCase : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__lowerCAmelCase : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase : Optional[int] = '''a cat sitting on a park bench'''
__lowerCAmelCase : List[str] = torch.manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='''np''' , )
__lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase : Any = '''a cat sitting on a park bench'''
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : str = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , output_type='''np''' , )
__lowerCAmelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 275
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(snake_case__ , (list, tuple) ) or not all(
isinstance(snake_case__ , snake_case__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
lowerCamelCase__ : str = numbers[0]
for i in range(1 , len(snake_case__ ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : List[str] = numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = min_till_now, max_till_now
lowerCamelCase__ : List[str] = max(snake_case__ , max_till_now * number )
lowerCamelCase__ : Union[str, Any] = min(snake_case__ , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : List[str] = max(snake_case__ , snake_case__ )
return max_prod
| 41
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
snake_case_ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case_ : Any = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ : Any = Text('CPU' , font_size=24 )
snake_case_ : List[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ : List[str] = [mem.copy() for i in range(1 )]
snake_case_ : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ : Optional[Any] = Text('GPU' , font_size=24 )
snake_case_ : Any = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.align_to(UpperCAmelCase_ , UpperCAmelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase_ )
snake_case_ : Union[str, Any] = [mem.copy() for i in range(6 )]
snake_case_ : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ : Optional[Any] = Text('Model' , font_size=24 )
snake_case_ : Optional[int] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) , )
snake_case_ : List[Any] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.""" , font_size=24 , )
snake_case_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=2.5 ) , Write(UpperCAmelCase_ ) , Write(UpperCAmelCase_ ) )
self.add(UpperCAmelCase_ )
snake_case_ : List[str] = []
snake_case_ : List[Any] = []
snake_case_ : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase_ )
cpu_target.generate_target()
snake_case_ : List[str] = 0.4_6 / 4
snake_case_ : List[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase_ , buff=0.0 )
cpu_targs.append(UpperCAmelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase_ ) )
second_animations.append(MoveToTarget(UpperCAmelCase_ , run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(*UpperCAmelCase_ )
self.wait()
| 327
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : Dict , A__ : Any , A__ : List[str] ):
'''simple docstring'''
if len(snake_case__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(snake_case__ )
or left < -len(snake_case__ )
or right >= len(snake_case__ )
or right < -len(snake_case__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(snake_case__ , snake_case__ , snake_case__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(snake_case__ , mid + 1 , snake_case__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = ["audio_values", "audio_mask"]
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=2_048 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Optional[int]=[16, 16] , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : List[str]=44_100 , __SCREAMING_SNAKE_CASE : Any=86 , __SCREAMING_SNAKE_CASE : List[str]=2_048 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ , )
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=UpperCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.array ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = log_spec - 20.0
__SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__SCREAMING_SNAKE_CASE = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ ).astype(np.floataa )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__SCREAMING_SNAKE_CASE = np.ones([len(UpperCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = audio_features[i]
__SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
__SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features}
__SCREAMING_SNAKE_CASE = BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
return encoded_inputs
| 267
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
__lowerCAmelCase : str = "vivit"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=[2, 16, 16] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_fast" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-06 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : Dict = num_frames
UpperCAmelCase : List[Any] = tubelet_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[Any] = qkv_bias
super().__init__(**UpperCAmelCase_ )
| 109
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
_A : List[str] = "donut-swin"
_A : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[Any] , snake_case: List[str]=224 , snake_case: List[Any]=4 , snake_case: int=3 , snake_case: Any=96 , snake_case: Optional[Any]=[2, 2, 6, 2] , snake_case: Dict=[3, 6, 12, 24] , snake_case: List[Any]=7 , snake_case: List[str]=4.0 , snake_case: Optional[Any]=True , snake_case: List[str]=0.0 , snake_case: Dict=0.0 , snake_case: str=0.1 , snake_case: List[str]="gelu" , snake_case: Tuple=False , snake_case: Union[str, Any]=0.0_2 , snake_case: List[str]=1E-5 , **snake_case: List[Any] , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
snake_case_ :Any = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :Any = num_channels
snake_case_ :Dict = embed_dim
snake_case_ :Dict = depths
snake_case_ :str = len(UpperCAmelCase_ )
snake_case_ :Optional[int] = num_heads
snake_case_ :List[Any] = window_size
snake_case_ :str = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Dict = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Optional[int] = drop_path_rate
snake_case_ :List[str] = hidden_act
snake_case_ :List[Any] = use_absolute_embeddings
snake_case_ :Dict = layer_norm_eps
snake_case_ :Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ :int = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
| 66
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 0
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowercase = img
lowercase = img.shape[1]
lowercase = img.shape[0]
lowercase = dst_width
lowercase = dst_height
lowercase = self.src_w / self.dst_w
lowercase = self.src_h / self.dst_h
lowercase = lowercase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_5_5
)
def UpperCamelCase_ ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase = self.img[self.get_y(UpperCAmelCase_ )][self.get_x(UpperCAmelCase_ )]
def UpperCamelCase_ ( self , _lowerCamelCase ):
return int(self.ratio_x * x )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCamelCase, _UpperCamelCase : Optional[Any] = 8_0_0, 6_0_0
_UpperCamelCase : str = imread('image_data/lena.jpg', 1)
_UpperCamelCase : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 220
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("T")
class __lowercase (Generic[T] ):
def __init__( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = data
__lowerCAmelCase : Any = None
def __str__( self ) ->Union[str, Any]:
'''simple docstring'''
return f"""{self.data}"""
class __lowercase (Generic[T] ):
def __init__( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = None
def __iter__( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.top
while node:
yield node.data
__lowerCAmelCase : Tuple = node.next
def __str__( self ) ->List[str]:
'''simple docstring'''
return "->".join([str(UpperCAmelCase_ ) for item in self] )
def __len__( self ) ->Optional[int]:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
return self.top is None
def UpperCamelCase__ ( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = Node(UpperCAmelCase_ )
if not self.is_empty():
__lowerCAmelCase : str = self.top
__lowerCAmelCase : str = node
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , UpperCAmelCase_ )
__lowerCAmelCase : List[str] = self.top
__lowerCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 275
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A : Any =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A : Dict =10
_A : Tuple =256
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[MinHash]:
if len(snake_case__ ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ : Any = MinHash(num_perm=snake_case__ )
for token in set(snake_case__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Set[str]:
return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0}
class _lowercase :
def __init__( self: Optional[Any] , *,
UpperCamelCase__: float = 0.85 , ):
lowerCamelCase__ : Dict = duplication_jaccard_threshold
lowerCamelCase__ : Union[str, Any] = NUM_PERM
lowerCamelCase__ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ : int = defaultdict(UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: MinHash ):
lowerCamelCase__ : List[str] = self._index.query(UpperCAmelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase_ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ : List[Any] = [base] + list(UpperCAmelCase_ )
# reformat the cluster to be a list of dict
lowerCamelCase__ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase_ )
return duplicate_clusters
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_duplicate_clusters()
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Dict = element
lowerCamelCase__ : int = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(snake_case__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = DuplicationIndex(duplication_jaccard_threshold=snake_case__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ) , max_queue_size=100 ) ):
di.add(snake_case__ , snake_case__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : List[Any] = get_tokens(snake_case__ )
lowerCamelCase__ : Optional[int] = get_tokens(snake_case__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A : str =None
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : List[Any] = []
for elementa in cluster:
lowerCamelCase__ : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowerCamelCase__ : Optional[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case__ , snake_case__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ : Dict = 1
extremes.append(snake_case__ )
return extremes
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
global _shared_dataset
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=snake_case__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case__ , snake_case__ , ) , total=len(snake_case__ ) , ):
extremes_list.append(snake_case__ )
return extremes_list
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowerCamelCase__ : Tuple = make_duplicate_clusters(snake_case__ , snake_case__ )
lowerCamelCase__ : Any = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : List[str] = find_extremes(snake_case__ , snake_case__ , snake_case__ )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ : Any = element
lowerCamelCase__ : Optional[Any] = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ : int = dataset.filter(lambda UpperCamelCase , UpperCamelCase : idx not in remove_indices , with_indices=snake_case__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ : Optional[Any] = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(snake_case__ )}''' )
print(f'''Number of duplicate clusters: {len(snake_case__ )}''' )
print(f'''Files in duplicate cluster: {len(snake_case__ )}''' )
print(f'''Unique files in duplicate cluster: {len(snake_case__ )}''' )
print(f'''Filtered dataset size: {len(snake_case__ )}''' )
return ds_filter, duplicate_clusters
| 41
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 0
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'num_heads' ) )
class SCREAMING_SNAKE_CASE_ :
def __init__( self : str , _A : Tuple , _A : Union[str, Any]=13 , _A : List[str]=64 , _A : Any=3 , _A : Optional[Any]=[16, 48, 96] , _A : Union[str, Any]=[1, 3, 6] , _A : str=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Optional[int]=[4, 2, 2] , _A : str=[2, 1, 1] , _A : List[str]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : Optional[int]=[0.0, 0.0, 0.0] , _A : List[Any]=0.0_2 , _A : Tuple=1E-12 , _A : Optional[Any]=True , _A : Optional[int]=True , _A : Optional[Any]=2 , ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = parent
snake_case_ : int = batch_size
snake_case_ : int = image_size
snake_case_ : Tuple = patch_sizes
snake_case_ : int = patch_stride
snake_case_ : Optional[int] = patch_padding
snake_case_ : Tuple = is_training
snake_case_ : Optional[Any] = use_labels
snake_case_ : Optional[Any] = num_labels
snake_case_ : List[str] = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : str = num_heads
snake_case_ : Any = stride_kv
snake_case_ : Tuple = depth
snake_case_ : List[str] = cls_token
snake_case_ : Tuple = attention_drop_rate
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = layer_norm_eps
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[int] , _A : List[str] , _A : str , _A : int ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = CvtModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ : str = model(UpperCAmelCase_ )
snake_case_ : Dict = (self.image_size, self.image_size)
snake_case_ ,snake_case_ : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase_ ( self : List[Any] , _A : str , _A : List[Any] , _A : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = self.num_labels
snake_case_ : Tuple = CvtForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__magic_name__: Dict = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__magic_name__: Dict = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__magic_name__: Dict = False
__magic_name__: Optional[int] = False
__magic_name__: Optional[Any] = False
__magic_name__: Any = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : Dict = CvtModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(UpperCAmelCase_ )
snake_case_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : int = [*signature.parameters.keys()]
snake_case_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def UpperCAmelCase_ ( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
def check_hidden_states_output(_A : Optional[int] , _A : List[Any] , _A : List[Any] ):
snake_case_ : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ : Optional[Any] = outputs.hidden_states
snake_case_ : Dict = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = CvtModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
snake_case_ : Tuple = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase_ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ : Dict = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ : List[str] = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 327
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCAmelCase_ = {
'facebook/mbart-large-50-one-to-many-mmt': 1_024,
}
# fmt: off
UpperCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCamelCase__( _UpperCAmelCase):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: List[str]="<unk>" , UpperCamelCase_: str="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: str , ):
__lowerCamelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase = 1
__lowerCamelCase = len(self.sp_model )
__lowerCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
__lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCamelCase = src_lang if src_lang is not None else """en_XX"""
__lowerCamelCase = self.lang_code_to_id[self._src_lang]
__lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ ( self: int ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self: List[str] ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
__lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: int ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: str , UpperCamelCase_: Dict ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str ):
__lowerCamelCase = []
__lowerCamelCase = """"""
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
__lowerCamelCase = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
__lowerCamelCase = [1] * len(self.prefix_tokens )
__lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCamelCase = src_lang
__lowerCamelCase = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
__lowerCamelCase = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "en_XX" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "ro_RO" , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self: List[str] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.lang_code_to_id[src_lang]
__lowerCamelCase = [self.cur_lang_code_id]
__lowerCamelCase = [self.eos_token_id]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
__lowerCamelCase = self.lang_code_to_id[tgt_lang]
__lowerCamelCase = [self.cur_lang_code_id]
__lowerCamelCase = [self.eos_token_id]
| 12
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
UpperCAmelCase_ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
__SCREAMING_SNAKE_CASE = self.conv(UpperCAmelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.conv(UpperCAmelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = None
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Dense(UpperCAmelCase_ , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=True ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = nn.swish(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.conva(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(UpperCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase_ , 1 ) , 1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = nn.swish(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.dropout(UpperCAmelCase_ , UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = self.conva(UpperCAmelCase_ )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(UpperCAmelCase_ )
return hidden_states + residual
| 267
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 0
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ (cls ) -> Union[str, Any]:
UpperCamelCase = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def snake_case_ (cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def snake_case_ (self ) -> str:
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def snake_case_ (self ) -> Dict:
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def snake_case_ (self ) -> Tuple:
CustomConfig.register_for_auto_class()
UpperCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
UpperCamelCase = AutoConfig.from_pretrained(F"{USER}/test-dynamic-config" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase = c.n_embd + 1 # int
UpperCamelCase = c.resid_pdrop + 1.0 # float
UpperCamelCase = not c.scale_attn_weights # bool
UpperCamelCase = c.summary_type + "foo" # str
c.update_from_string(
F"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" )
def snake_case_ (self ) -> str:
UpperCamelCase = PretrainedConfig()
UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F" {', '.join(UpperCAmelCase_ )}." )
def snake_case_ (self ) -> Optional[int]:
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(UpperCAmelCase_ )
def snake_case_ (self ) -> Any:
UpperCamelCase = mock.Mock()
UpperCamelCase = 5_00
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ (self ) -> Any:
UpperCamelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def snake_case_ (self ) -> Any:
UpperCamelCase = AutoConfig.from_pretrained("bert-base-cased" )
UpperCamelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase = ["config.42.0.0.json"]
UpperCamelCase = 7_68
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) )
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def snake_case_ (self ) -> int:
UpperCamelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
UpperCamelCase = "v4.0.0"
UpperCamelCase , UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase = "v3.0.0"
UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 153
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 0
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {"vocab_file": "vocab.txt"}
A: List[str] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
A: int = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = collections.OrderedDict()
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as reader:
UpperCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(snake_case__ ):
UpperCAmelCase : Tuple = token.rstrip("""\n""" )
UpperCAmelCase : List[Any] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=200 ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = vocab
UpperCAmelCase : int = unk_token
UpperCAmelCase : Union[str, Any] = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = list(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = []
while start < len(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = len(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = None
while start < end:
UpperCAmelCase : Tuple = """""".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
__lowerCAmelCase : Dict = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = ["input_ids", "attention_mask"]
__lowerCAmelCase : str = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=UpperCAmelCase_ , eod_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , line_token=UpperCAmelCase_ , space_token=UpperCAmelCase_ , padding_side=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCAmelCase : str = bod_token
UpperCAmelCase : str = eod_token
UpperCAmelCase : Optional[Any] = load_vocab(UpperCAmelCase_ )
UpperCAmelCase : Any = self.encoder[space_token]
UpperCAmelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = []
for x in jieba.cut(UpperCAmelCase_ , cut_all=UpperCAmelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase_ ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [i for i in token_ids if i >= 0]
UpperCAmelCase : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase_ , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return "".join(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
'''simple docstring'''
if os.path.isdir(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCAmelCase : Any = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
UpperCAmelCase : Dict = 0
if " " in self.encoder:
UpperCAmelCase : Any = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase : Optional[Any] = self.encoder["""\n"""]
del self.encoder["\n"]
UpperCAmelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
UpperCAmelCase : Optional[Any] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ ))
return [1] + ([0] * len(UpperCAmelCase_ ))
| 109
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = 0
snake_case_ :int = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ :str = i + 1
else:
snake_case_ :Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 66
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 0
|
def A__ ( __lowerCamelCase = 1_00 ):
SCREAMING_SNAKE_CASE_ = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str=13 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Union[str, Any]=10 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=2 , ) ->Any:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = scope
a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
a = ViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
a = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) ->Optional[Any]:
"""simple docstring"""
a = ViTForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
a = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a = 1
a = ViTForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
a = self.type_sequence_label_size
a = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
a = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a = 1
a = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = ViTModelTester(self )
a = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(UpperCAmelCase_ )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Tuple:
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCAmelCase_ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
a = model(**UpperCAmelCase_ )
# verify the logits
a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
a = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCAmelCase_ )
a = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
a = prepare_img()
a = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
a = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
a = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ )
# verify the logits
a = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
a = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a = model(UpperCAmelCase_ )
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 0
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 220
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __lowercase (_UpperCAmelCase ):
def __init__( self , **A_ ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A_ , **A_ ) ->int:
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase : Optional[int] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__lowerCAmelCase : Dict = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__ ( self , A_ , A_=None , A_="This is a photo of {}." ) ->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = load_image(UpperCAmelCase_ )
__lowerCAmelCase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework )
__lowerCAmelCase : List[str] = candidate_labels
__lowerCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__lowerCAmelCase : List[Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__lowerCAmelCase : str = [text_inputs]
return inputs
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = model_inputs.pop('''candidate_labels''' )
__lowerCAmelCase : Any = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase : Union[str, Any] = text_inputs[0][0]
__lowerCAmelCase : Optional[int] = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCAmelCase : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = model_outputs.pop('''candidate_labels''' )
__lowerCAmelCase : int = model_outputs['''logits'''][0]
if self.framework == "pt":
__lowerCAmelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__lowerCAmelCase : Union[str, Any] = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__lowerCAmelCase : str = stable_softmax(UpperCAmelCase_ , axis=-1 )
__lowerCAmelCase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCAmelCase : List[str] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda A_ : -x[0] )
]
return result
| 275
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 0
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_A : Tuple =logging.get_logger(__name__)
_A : Any ={
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
_A : Tuple ={
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
_A : Any ={
'''jukebox''': 512,
}
class _lowercase ( _UpperCAmelCase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_LYRIC_TOKENS_SIZES
a = ["input_ids", "attention_mask"]
def __init__( self: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Any=["v3", "v2", "v2"] , UpperCamelCase__: Dict=512 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]="<|endoftext|>" , **UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__ : Any = version
lowerCamelCase__ : str = max_n_lyric_tokens
lowerCamelCase__ : Optional[Any] = n_genres
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase__ : Tuple = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase__ : Tuple = json.load(UpperCAmelCase_ )
lowerCamelCase__ : Optional[int] = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCamelCase__ : Dict = oov.replace(R"""\-'""" , R"""\-+'""" )
lowerCamelCase__ : List[str] = regex.compile(UpperCAmelCase_ )
lowerCamelCase__ : Tuple = {v: k for k, v in self.artists_encoder.items()}
lowerCamelCase__ : int = {v: k for k, v in self.genres_encoder.items()}
lowerCamelCase__ : str = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase_ ( self: str ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase_ ( self: List[Any] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: int ):
lowerCamelCase__ : str = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
lowerCamelCase__ : int = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
lowerCamelCase__ : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCamelCase__ : Dict = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int ):
return list(UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , **UpperCamelCase__: Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : Dict = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCamelCase__ : str = artists[idx].lower()
lowerCamelCase__ : Dict = [genres[idx].lower()]
else:
lowerCamelCase__ : int = self._normalize(artists[idx] ) + """.v2"""
lowerCamelCase__ : Union[str, Any] = [
self._normalize(UpperCAmelCase_ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCamelCase__ : int = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowerCamelCase__ : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
lowerCamelCase__ : Any = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = len(UpperCAmelCase_ ) + 1
lowerCamelCase__ : Optional[Any] = self.vocab
lowerCamelCase__ : List[str] = {v: k for k, v in self.vocab.items()}
lowerCamelCase__ : int = """"""
else:
lowerCamelCase__ : Optional[int] = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowerCamelCase__ : str = self._run_strip_accents(UpperCAmelCase_ )
lowerCamelCase__ : Union[str, Any] = lyrics.replace("""\\""" , """\n""" )
lowerCamelCase__ : Union[str, Any] = self.out_of_vocab.sub("""""" , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = unicodedata.normalize("""NFD""" , UpperCAmelCase_ )
lowerCamelCase__ : List[Any] = []
for char in text:
lowerCamelCase__ : int = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str ):
lowerCamelCase__ : Optional[Any] = (
[chr(UpperCAmelCase_ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
lowerCamelCase__ : Union[str, Any] = frozenset(UpperCAmelCase_ )
lowerCamelCase__ : Any = re.compile(R"""_+""" )
lowerCamelCase__ : Tuple = """""".join([c if c in accepted else """_""" for c in text.lower()] )
lowerCamelCase__ : str = pattern.sub("""_""" , UpperCAmelCase_ ).strip("""_""" )
return text
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[str] ):
return " ".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: bool = False ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase__ : Dict = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowerCamelCase__ : str = tf.constant
lowerCamelCase__ : str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowerCamelCase__ : str = torch.tensor
lowerCamelCase__ : Union[str, Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowerCamelCase__ : int = jnp.array
lowerCamelCase__ : List[Any] = _is_jax
else:
lowerCamelCase__ : Any = np.asarray
lowerCamelCase__ : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCamelCase__ : Tuple = [inputs]
if not is_tensor(UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self: Dict , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: str="" , UpperCamelCase__: Optional[Any]="pt" ):
lowerCamelCase__ : List[Any] = [0, 0, 0]
lowerCamelCase__ : Optional[Any] = [artist] * len(self.version )
lowerCamelCase__ : str = [genres] * len(self.version )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ : int = [-INFINITY] * len(full_tokens[-1] )
lowerCamelCase__ : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
lowerCamelCase__ : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Tuple = self.artists_decoder.get(UpperCAmelCase_ )
lowerCamelCase__ : Tuple = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
lowerCamelCase__ : Optional[Any] = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 41
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Any , _A : int , _A : Optional[int]=13 , _A : str=7 , _A : int=True , _A : List[str]=True , _A : Dict=True , _A : Any=True , _A : Tuple=99 , _A : Optional[Any]=32 , _A : Optional[int]=2 , _A : Tuple=4 , _A : Tuple=37 , _A : Union[str, Any]="gelu" , _A : List[str]=0.1 , _A : int=0.1 , _A : str=512 , _A : Union[str, Any]=16 , _A : List[Any]=2 , _A : str=0.0_2 , _A : int=False , _A : Union[str, Any]=True , _A : Optional[Any]="None" , _A : Optional[int]=3 , _A : Any=4 , _A : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = parent
snake_case_ : Any = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : List[str] = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : List[str] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Any = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Optional[Any] = relative_attention
snake_case_ : Tuple = position_biased_input
snake_case_ : int = pos_att_type
snake_case_ : str = scope
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = None
if self.use_input_mask:
snake_case_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = None
snake_case_ : str = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : int = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[Any] , _A : int , _A : Optional[Any] , _A : str , _A : int , _A : List[str] , _A : List[str] , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = TFDebertaVaModel(config=UpperCAmelCase_ )
snake_case_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case_ : Union[str, Any] = [input_ids, input_mask]
snake_case_ : Tuple = model(UpperCAmelCase_ )
snake_case_ : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , _A : Optional[int] , _A : List[str] , _A : Tuple , _A : int , _A : Optional[Any] , _A : str , _A : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
snake_case_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Any , _A : Any , _A : List[str] , _A : Dict , _A : List[str] , _A : str , _A : int , _A : int ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.num_labels
snake_case_ : Union[str, Any] = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
snake_case_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : Any , _A : List[Any] , _A : Any ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = self.num_labels
snake_case_ : Union[str, Any] = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
snake_case_ : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Any , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] , _A : str , _A : str , _A : Any , _A : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
snake_case_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) : Any = config_and_inputs
snake_case_ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__magic_name__: int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__: Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__: Dict = False
__magic_name__: Optional[Any] = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = TFDebertaVaModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : str = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case_ : str = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case_ : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
snake_case_ : Union[str, Any] = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 327
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
import string
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowerCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCamelCase = string.ascii_uppercase.find(snake_case__ )
__lowerCamelCase = num - key
if num < 0:
__lowerCamelCase = num + len(string.ascii_uppercase )
__lowerCamelCase = translated + string.ascii_uppercase[num]
else:
__lowerCamelCase = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = input("""Encrypted message: """ )
__lowerCamelCase = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 12
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 0
|
'''simple docstring'''
def a__ ( a__ = 2_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__SCREAMING_SNAKE_CASE = [0] * (pence + 1)
__SCREAMING_SNAKE_CASE = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 267
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase = F"Expected string as input, found {type(snake_case__ )}"
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase = F"Expected boolean as use_pascal parameter, found {type(snake_case__ )}"
raise ValueError(snake_case__ )
UpperCamelCase = input_str.split("_" )
UpperCamelCase = 0 if use_pascal else 1
UpperCamelCase = words[start_index:]
UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( _A ) -> int:
"""simple docstring"""
snake_case__ = filter(lambda _A : p.requires_grad , model.parameters() )
snake_case__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Any = logging.getLogger(__name__)
def a_ ( _A , _A ) -> Dict:
"""simple docstring"""
if metric == "rouge2":
snake_case__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case__ = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
snake_case__ = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=f'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , )
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] ) -> Union[str, Any]:
snake_case__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: str , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule , UpperCamelCase: str , UpperCamelCase: str=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
snake_case__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case__ = od / 'test_results.txt'
snake_case__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
snake_case__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , 'a+' ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case__ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = val.item()
snake_case__ = F'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
snake_case__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: int , UpperCamelCase: List[str] , UpperCamelCase: int ) -> Optional[Any]:
try:
snake_case__ = pl_module.model.model.num_parameters()
except AttributeError:
snake_case__ = pl_module.model.num_parameters()
snake_case__ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase_ ( self: int , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , 'test' )
@rank_zero_only
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: pl.Trainer , UpperCamelCase: Tuple ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 307
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import os
import numpy
import onnx
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = a.name
snake_case__ = b.name
snake_case__ = ''
snake_case__ = ''
snake_case__ = a == b
snake_case__ = name_a
snake_case__ = name_b
return res
def a_ ( _A , _A , _A ) -> Any:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def a_ ( _A , _A , _A ) -> int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def a_ ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = list(model.graph.initializer )
snake_case__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case__ = inits[i].name
snake_case__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def a_ ( _A ) -> Tuple:
"""simple docstring"""
snake_case__ = os.path.dirname(_A )
snake_case__ = os.path.basename(_A )
snake_case__ = onnx.load(os.path.join(_A , _A ) )
snake_case__ = list(model.graph.initializer )
snake_case__ = set()
snake_case__ = {}
snake_case__ = []
snake_case__ = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
snake_case__ = inits[j].data_type
snake_case__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _A )
total_reduced_size += mem_size
snake_case__ = inits[i].name
snake_case__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
snake_case__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case__ = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
snake_case__ = 'optimized_' + model_file_name
snake_case__ = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 307
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None:
super().__init__(**UpperCamelCase )
snake_case__ = size if size is not None else {'shortest_edge': 2_56}
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray:
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray:
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]:
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
snake_case__ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 307
| 1
|
from math import factorial
__UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a_ ( _A ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_A ) )
def a_ ( _A = 60 , _A = 1000000 ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ) or not isinstance(_A , _A ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
snake_case__ = 0
# the cached sizes of the previous chains
snake_case__ = {}
for start_chain_element in range(1 , _A ):
# The temporary set will contain the elements of the chain
snake_case__ = set()
snake_case__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_A )
chain_set_length += 1
snake_case__ = digit_factorial_sum(_A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 307
|
import random
from typing import Any
def a_ ( _A ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_A ) ):
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307
| 1
|
def a_ ( ) -> int:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_A , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
|
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] ) -> Union[str, Any]:
snake_case__ = [
[],
[],
[],
]
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCamelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self: Union[str, Any] ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Union[str, Any] ) -> Any:
snake_case__ = []
def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None:
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case__ = min(self.queue )
self.queue.remove(UpperCamelCase )
return data
def __str__( self: Optional[Any] ) -> str:
return str(self.queue )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 307
| 1
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCamelCase : int = get_tests_dir("""fixtures/dummy-config.json""")
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Any ) -> int:
snake_case__ = 0
def lowerCAmelCase_ ( self: str ) -> int:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case__ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
snake_case__ = AutoConfig.for_model('roberta' )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
snake_case__ = os.path.join(UpperCamelCase , 'fake-roberta' )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with open(os.path.join(UpperCamelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase )
self.assertEqual(type(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Dict:
try:
AutoConfig.register('custom' , UpperCamelCase )
# Wrong model type will raise an error
with self.assertRaises(UpperCamelCase ):
AutoConfig.register('model' , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoConfig.register('bert' , UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase )
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
with self.assertRaisesRegex(
UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
snake_case__ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self: Any ) -> Optional[Any]:
with self.assertRaisesRegex(
UpperCamelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
with self.assertRaisesRegex(
UpperCamelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCamelCase )
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCamelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase )
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "new-model"
try:
AutoConfig.register('new-model' , UpperCamelCase )
# If remote code is not set, the default is to use local
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCamelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
snake_case__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCamelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 307
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "LayoutLMv2ImageProcessor"
_UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['words']
snake_case__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
snake_case__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
snake_case__ = images
return encoded_inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case__ = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
snake_case__ = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , x.transpose() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , transpose(UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , transpose(UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , transpose(UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , transpose(UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , np.asarray(transpose(UpperCamelCase ) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , np.reshape(UpperCamelCase , (4, 3) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , np.reshape(UpperCamelCase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase_ ( self: str ) -> int:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , reshape(UpperCamelCase , (4, 3) ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , reshape(UpperCamelCase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , reshape(UpperCamelCase , (4, 3) ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , reshape(UpperCamelCase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , np.asarray(reshape(UpperCamelCase , (4, 3) ) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , np.asarray(reshape(UpperCamelCase , (12, 5) ) ) ) )
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case__ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , np.squeeze(UpperCamelCase ) ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , np.squeeze(UpperCamelCase , axis=2 ) ) )
@require_torch
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , squeeze(UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , squeeze(UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , squeeze(UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , squeeze(UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , np.asarray(squeeze(UpperCamelCase ) ) ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , np.asarray(squeeze(UpperCamelCase , axis=2 ) ) ) )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , np.expand_dims(UpperCamelCase , axis=1 ) ) )
@require_torch
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , expand_dims(UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , expand_dims(UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , np.asarray(expand_dims(UpperCamelCase , axis=1 ) ) ) )
| 307
|
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : int = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import os
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = np.argmax(_A , axis=1 )
return np.sum(outputs == labels )
def a_ ( _A ) -> int:
"""simple docstring"""
with open(_A , encoding='utf_8' ) as f:
snake_case__ = csv.reader(_A )
snake_case__ = []
next(_A ) # skip the first line
for line in tqdm(_A ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a_ ( _A , _A , _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = []
for dataset in encoded_datasets:
snake_case__ = len(_A )
snake_case__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case__ = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case__ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_A ):
snake_case__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ = with_conta
snake_case__ = with_conta
snake_case__ = len(_A ) - 1
snake_case__ = len(_A ) - 1
snake_case__ = with_conta
snake_case__ = with_conta
snake_case__ = mc_label
snake_case__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_A ) for t in all_inputs ) )
return tensor_datasets
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_A , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_A , type=_A , required=_A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_A , default='' )
parser.add_argument('--eval_dataset' , type=_A , default='' )
parser.add_argument('--seed' , type=_A , default=42 )
parser.add_argument('--num_train_epochs' , type=_A , default=3 )
parser.add_argument('--train_batch_size' , type=_A , default=8 )
parser.add_argument('--eval_batch_size' , type=_A , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_A , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_A , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_A , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_A , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_A , default=0.01 )
parser.add_argument('--lm_coef' , type=_A , default=0.9 )
parser.add_argument('--n_valid' , type=_A , default=374 )
parser.add_argument('--server_ip' , type=_A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_A , default='' , help='Can be used for distant debugging.' )
snake_case__ = parser.parse_args()
print(_A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case__ = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_A , _A ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case__ = ['_start_', '_delimiter_', '_classify_']
snake_case__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_A )
snake_case__ = tokenizer.convert_tokens_to_ids(_A )
snake_case__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_A ) )
model.to(_A )
# Load and encode the datasets
def tokenize_and_encode(_A ):
if isinstance(_A , _A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_A ) )
elif isinstance(_A , _A ):
return obj
return [tokenize_and_encode(_A ) for o in obj]
logger.info('Encoding dataset...' )
snake_case__ = load_rocstories_dataset(args.train_dataset )
snake_case__ = load_rocstories_dataset(args.eval_dataset )
snake_case__ = (train_dataset, eval_dataset)
snake_case__ = tokenize_and_encode(_A )
# Compute the max input length for the Transformer
snake_case__ = model.config.n_positions // 2 - 2
snake_case__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case__ = min(_A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case__ = pre_process_datasets(_A , _A , _A , *_A )
snake_case__ , snake_case__ = tensor_datasets[0], tensor_datasets[1]
snake_case__ = TensorDataset(*_A )
snake_case__ = RandomSampler(_A )
snake_case__ = DataLoader(_A , sampler=_A , batch_size=args.train_batch_size )
snake_case__ = TensorDataset(*_A )
snake_case__ = SequentialSampler(_A )
snake_case__ = DataLoader(_A , sampler=_A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case__ = args.max_steps
snake_case__ = args.max_steps // (len(_A ) // args.gradient_accumulation_steps) + 1
else:
snake_case__ = len(_A ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case__ = list(model.named_parameters() )
snake_case__ = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
snake_case__ = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
snake_case__ = AdamW(_A , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case__ = get_linear_schedule_with_warmup(
_A , num_warmup_steps=args.warmup_steps , num_training_steps=_A )
if args.do_train:
snake_case__ , snake_case__ , snake_case__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
snake_case__ = 0
snake_case__ = 0
snake_case__ = tqdm(_A , desc='Training' )
for step, batch in enumerate(_A ):
snake_case__ = tuple(t.to(_A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = batch
snake_case__ = model(_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
snake_case__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case__ = 'Training loss: {:.2e} lr: {:.2e}'.format(_A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case__ = model.module if hasattr(_A , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case__ = os.path.join(args.output_dir , _A )
snake_case__ = os.path.join(args.output_dir , _A )
torch.save(model_to_save.state_dict() , _A )
model_to_save.config.to_json_file(_A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_A )
if args.do_eval:
model.eval()
snake_case__ , snake_case__ = 0, 0
snake_case__ , snake_case__ = 0, 0
for batch in tqdm(_A , desc='Evaluating' ):
snake_case__ = tuple(t.to(_A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = batch
with torch.no_grad():
snake_case__ , snake_case__ , snake_case__ , snake_case__ = model(
_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
snake_case__ = mc_logits.detach().cpu().numpy()
snake_case__ = mc_labels.to('cpu' ).numpy()
snake_case__ = accuracy(_A , _A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case__ = eval_loss / nb_eval_steps
snake_case__ = eval_accuracy / nb_eval_examples
snake_case__ = tr_loss / nb_tr_steps if args.do_train else None
snake_case__ = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
snake_case__ = os.path.join(args.output_dir , 'eval_results.txt' )
with open(_A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptj"
_UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: int , UpperCamelCase: Optional[Any]=5_04_00 , UpperCamelCase: List[Any]=20_48 , UpperCamelCase: List[Any]=40_96 , UpperCamelCase: Optional[int]=28 , UpperCamelCase: Any=16 , UpperCamelCase: Any=64 , UpperCamelCase: Tuple=None , UpperCamelCase: Dict="gelu_new" , UpperCamelCase: str=0.0 , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: int=0.0 , UpperCamelCase: Tuple=1e-5 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: List[Any]=True , UpperCamelCase: Optional[Any]=5_02_56 , UpperCamelCase: Optional[int]=5_02_56 , UpperCamelCase: Any=False , **UpperCamelCase: Dict , ) -> int:
snake_case__ = vocab_size
snake_case__ = n_positions
snake_case__ = n_embd
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_inner
snake_case__ = rotary_dim
snake_case__ = activation_function
snake_case__ = resid_pdrop
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_range
snake_case__ = use_cache
snake_case__ = bos_token_id
snake_case__ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: List[Any] , UpperCamelCase: PretrainedConfig , UpperCamelCase: str = "default" , UpperCamelCase: List[PatchingSpec] = None , UpperCamelCase: bool = False , ) -> Any:
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase ):
# TODO: how to do that better?
snake_case__ = 0
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
snake_case__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='inputs' )
snake_case__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase_ ( self: List[str] ) -> int:
return self._config.n_layer
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return self._config.n_head
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: PreTrainedTokenizer , UpperCamelCase: int = -1 , UpperCamelCase: int = -1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[TensorType] = None , ) -> Mapping[str, Any]:
snake_case__ = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
snake_case__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case__ , snake_case__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case__ = seqlen + 2
snake_case__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
snake_case__ = common_inputs['attention_mask']
if self.use_past:
snake_case__ = ordered_inputs['attention_mask'].dtype
snake_case__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
return 13
| 307
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Any = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
__UpperCamelCase : Optional[Any] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def a_ ( _A , _A=1 , _A=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a_ ( _A ) -> int:
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def a_ ( _A , _A , _A , _A=True ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
snake_case__ = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
snake_case__ = read_json(os.path.join(_A , 'params.json' ) )
snake_case__ = NUM_SHARDS[model_size]
snake_case__ = params['n_layers']
snake_case__ = params['n_heads']
snake_case__ = n_heads // num_shards
snake_case__ = params['dim']
snake_case__ = dim // n_heads
snake_case__ = 10000.0
snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case__ = params['n_kv_heads'] # for GQA / MQA
snake_case__ = n_heads_per_shard // num_key_value_heads
snake_case__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case__ = n_heads
snake_case__ = n_heads_per_shard
snake_case__ = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
snake_case__ = [
torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(_A )
]
snake_case__ = 0
snake_case__ = {'weight_map': {}}
for layer_i in range(_A ):
snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case__ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
snake_case__ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = inv_freq
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
snake_case__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
snake_case__ = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256
snake_case__ = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Initialize the tokenizer based on the `spm` model
snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case__ = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
snake_case__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 307
| 1
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = BertTokenizer
_UpperCAmelCase = BertTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = filter_non_english
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
super().setUp()
snake_case__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self: str , UpperCamelCase: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = 'UNwant\u00E9d,running'
snake_case__ = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
snake_case__ = self.tokenizer_class(self.vocab_file )
snake_case__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase_ ( self: Any ) -> Dict:
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = 'UNwant\u00E9d,running'
snake_case__ = tokenizer.tokenize(UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# With lower casing
snake_case__ = self.get_tokenizer(do_lower_case=UpperCamelCase )
snake_case__ = self.get_rust_tokenizer(do_lower_case=UpperCamelCase )
snake_case__ = 'UNwant\u00E9d,running'
snake_case__ = tokenizer.tokenize(UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase , strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase , strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase , strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self: Any ) -> int:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase , strip_accents=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case__ = BasicTokenizer(do_lower_case=UpperCamelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case__ = BasicTokenizer()
snake_case__ = 'a\n\'ll !!to?\'d of, can\'t.'
snake_case__ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case__ = {}
for i, token in enumerate(UpperCamelCase ):
snake_case__ = i
snake_case__ = WordpieceTokenizer(vocab=UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase_ ( self: Any ) -> Any:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase_ ( self: str ) -> Any:
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase_ ( self: str ) -> int:
snake_case__ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
snake_case__ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def lowerCAmelCase_ ( self: Any ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
snake_case__ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case__ = tokenizer_r.encode_plus(
UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase , )
snake_case__ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase , 'do_lower_case' ) else False
snake_case__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
snake_case__ = ['的', '人', '有']
snake_case__ = ''.join(UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ = True
snake_case__ = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
snake_case__ = tokenizer_p.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer_r.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase )
snake_case__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = False
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
snake_case__ = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
snake_case__ = tokenizer_r.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer_p.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase )
snake_case__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase )
]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
| 307
|
import os
import string
import sys
__UpperCamelCase : List[Any] = 1 << 8
__UpperCamelCase : Union[str, Any] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__UpperCamelCase : Optional[Any] = KEYMAP["""up"""]
__UpperCamelCase : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def a_ ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case__ = chr(KEYMAP['esc'] )
except KeyError:
snake_case__ = cha[1]
else:
snake_case__ = ch.decode(_A )
else:
snake_case__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ = sys.stdin.fileno()
snake_case__ = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case__ = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case__ = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case__ = 'ZinengTang/tvlt-base'
snake_case__ = tempfile.mkdtemp()
def lowerCAmelCase_ ( self: Any , **UpperCamelCase: List[str] ) -> Tuple:
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCAmelCase_ ( self: int , **UpperCamelCase: Optional[Any] ) -> Tuple:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCAmelCase_ ( self: str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' )
snake_case__ = processor(audio=UpperCamelCase , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(UpperCamelCase , return_tensors='np' )
snake_case__ = processor(images=UpperCamelCase , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self: str ) -> int:
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 307
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptsan-japanese"
_UpperCAmelCase = [
"past_key_values",
]
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]:
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = d_ff
snake_case__ = d_ext
snake_case__ = d_spout
snake_case__ = num_switch_layers
snake_case__ = num_ext_layers
snake_case__ = num_switch_layers + num_ext_layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = router_bias
snake_case__ = router_jitter_noise
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = output_hidden_states
snake_case__ = output_attentions
snake_case__ = initializer_factor
snake_case__ = output_router_logits
snake_case__ = use_cache
super().__init__(
separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
| 307
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCamelCase : Any = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCamelCase : Union[str, Any] = concatenate_datasets
__UpperCamelCase : Optional[Any] = DownloadConfig
__UpperCamelCase : str = DownloadManager
__UpperCamelCase : List[str] = DownloadMode
__UpperCamelCase : Optional[Any] = DownloadConfig
__UpperCamelCase : Any = DownloadMode
__UpperCamelCase : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Any = {
"""yjernite/retribert-base-uncased""": 512,
}
__UpperCamelCase : Optional[Any] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = RetriBertTokenizer
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Tuple , UpperCamelCase: str=None , UpperCamelCase: List[Any]=None , UpperCamelCase: List[Any]=True , UpperCamelCase: List[Any]="[UNK]" , UpperCamelCase: Tuple="[SEP]" , UpperCamelCase: List[str]="[PAD]" , UpperCamelCase: Optional[int]="[CLS]" , UpperCamelCase: Union[str, Any]="[MASK]" , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=None , **UpperCamelCase: List[str] , ) -> Tuple:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
snake_case__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
snake_case__ = do_lower_case
snake_case__ = strip_accents
snake_case__ = tokenize_chinese_chars
snake_case__ = normalizer_class(**UpperCamelCase )
snake_case__ = do_lower_case
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any]=None ) -> List[Any]:
snake_case__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self: int , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
snake_case__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 307
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: str , UpperCamelCase: Optional[Any]=None , UpperCamelCase: List[str]=None , **UpperCamelCase: Tuple ) -> List[Any]:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , **UpperCamelCase: List[Any] ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] ) -> str:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Dict ) -> List[str]:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> List[str]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 307
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Tuple = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
snake_case__ = legacy
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = extra_ids
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , )
return max_model_length
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]:
if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self: Union[str, Any] ) -> List[str]:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]:
snake_case__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' )
return super().tokenize(UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str:
if not self.legacy:
snake_case__ = text.startswith(UpperCamelCase )
if is_first:
snake_case__ = text[1:]
snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ):
snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict:
if token.startswith('<extra_id_' ):
snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase )
snake_case__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple:
if index < self.sp_model.get_piece_size():
snake_case__ = self.sp_model.IdToPiece(UpperCamelCase )
else:
snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict:
snake_case__ = []
snake_case__ = ''
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(UpperCamelCase )
snake_case__ = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 307
| 1
|
def a_ ( _A , _A ) -> Any:
"""simple docstring"""
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case__ = (boundary[1] - boundary[0]) / steps
snake_case__ = boundary[0]
snake_case__ = boundary[1]
snake_case__ = make_points(_A , _A , _A )
snake_case__ = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def a_ ( _A , _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ = a + h
while x < (b - h):
yield x
snake_case__ = x + h
def a_ ( _A ) -> Optional[Any]: # enter your function here
"""simple docstring"""
snake_case__ = (x - 0) * (x - 0)
return y
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = 0.0 # Lower bound of integration
snake_case__ = 1.0 # Upper bound of integration
snake_case__ = 10.0 # define number of steps or resolution
snake_case__ = [a, b] # define boundary of integration
snake_case__ = method_a(_A , _A )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a_ ( _A ) -> bool:
"""simple docstring"""
snake_case__ = int(number**0.5 )
return number == sq * sq
def a_ ( _A , _A , _A , _A , _A , _A ) -> tuple[int, int]:
"""simple docstring"""
snake_case__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case__ = x_den * y_den * z_den
snake_case__ = gcd(_A , _A )
top //= hcf
bottom //= hcf
return top, bottom
def a_ ( _A = 35 ) -> int:
"""simple docstring"""
snake_case__ = set()
snake_case__ = 42
snake_case__ = Fraction(0 )
snake_case__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case__ = x_num * y_den + x_den * y_num
snake_case__ = x_den * y_den
snake_case__ = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
snake_case__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case__ = x_den * x_den * y_den * y_den
if is_sq(_A ) and is_sq(_A ):
snake_case__ = int(sqrt(_A ) )
snake_case__ = int(sqrt(_A ) )
snake_case__ = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=-1
snake_case__ = x_num * y_num
snake_case__ = x_den * y_num + x_num * y_den
snake_case__ = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
snake_case__ = x_num * x_num * y_num * y_num
snake_case__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_A ) and is_sq(_A ):
snake_case__ = int(sqrt(_A ) )
snake_case__ = int(sqrt(_A ) )
snake_case__ = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
for num, den in unique_s:
total += Fraction(_A , _A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
|
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "align_text_model"
def __init__( self: Union[str, Any] , UpperCamelCase: Optional[Any]=3_05_22 , UpperCamelCase: str=7_68 , UpperCamelCase: int=12 , UpperCamelCase: str=12 , UpperCamelCase: Union[str, Any]=30_72 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: int=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=5_12 , UpperCamelCase: int=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: Dict=1e-12 , UpperCamelCase: List[str]=0 , UpperCamelCase: Optional[int]="absolute" , UpperCamelCase: List[str]=True , **UpperCamelCase: Dict , ) -> List[str]:
super().__init__(**UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = pad_token_id
@classmethod
def lowerCAmelCase_ ( cls: Tuple , UpperCamelCase: Union[str, os.PathLike] , **UpperCamelCase: Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase )
snake_case__ , snake_case__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
snake_case__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "align_vision_model"
def __init__( self: Optional[Any] , UpperCamelCase: int = 3 , UpperCamelCase: int = 6_00 , UpperCamelCase: float = 2.0 , UpperCamelCase: float = 3.1 , UpperCamelCase: int = 8 , UpperCamelCase: List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase: List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase: List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase: List[int] = [] , UpperCamelCase: List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase: List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase: List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase: float = 0.25 , UpperCamelCase: str = "swish" , UpperCamelCase: int = 25_60 , UpperCamelCase: str = "mean" , UpperCamelCase: float = 0.02 , UpperCamelCase: float = 0.001 , UpperCamelCase: float = 0.99 , UpperCamelCase: float = 0.2 , **UpperCamelCase: Any , ) -> Optional[Any]:
super().__init__(**UpperCamelCase )
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = width_coefficient
snake_case__ = depth_coefficient
snake_case__ = depth_divisor
snake_case__ = kernel_sizes
snake_case__ = in_channels
snake_case__ = out_channels
snake_case__ = depthwise_padding
snake_case__ = strides
snake_case__ = num_block_repeats
snake_case__ = expand_ratios
snake_case__ = squeeze_expansion_ratio
snake_case__ = hidden_act
snake_case__ = hidden_dim
snake_case__ = pooling_type
snake_case__ = initializer_range
snake_case__ = batch_norm_eps
snake_case__ = batch_norm_momentum
snake_case__ = drop_connect_rate
snake_case__ = sum(UpperCamelCase ) * 4
@classmethod
def lowerCAmelCase_ ( cls: str , UpperCamelCase: Union[str, os.PathLike] , **UpperCamelCase: List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase )
snake_case__ , snake_case__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
snake_case__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "align"
_UpperCAmelCase = True
def __init__( self: Optional[Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Any]=6_40 , UpperCamelCase: List[str]=1.0 , UpperCamelCase: int=0.02 , **UpperCamelCase: Tuple , ) -> Any:
super().__init__(**UpperCamelCase )
if text_config is None:
snake_case__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
snake_case__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
snake_case__ = AlignTextConfig(**UpperCamelCase )
snake_case__ = AlignVisionConfig(**UpperCamelCase )
snake_case__ = projection_dim
snake_case__ = temperature_init_value
snake_case__ = initializer_range
@classmethod
def lowerCAmelCase_ ( cls: Optional[int] , UpperCamelCase: AlignTextConfig , UpperCamelCase: AlignVisionConfig , **UpperCamelCase: Union[str, Any] ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.text_config.to_dict()
snake_case__ = self.vision_config.to_dict()
snake_case__ = self.__class__.model_type
return output
| 307
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int:
super().__init__(features=UpperCamelCase )
snake_case__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]:
import torch
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]:
import torch
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case__ = {'dtype': torch.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
snake_case__ = np.asarray(UpperCamelCase )
return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor":
snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
snake_case__ = self._consolidate(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
snake_case__ = self._consolidate(batch[column_name] )
return batch
| 307
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "informer"
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Tuple , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: str = "student_t" , UpperCamelCase: str = "nll" , UpperCamelCase: int = 1 , UpperCamelCase: List[int] = None , UpperCamelCase: Optional[Union[str, bool]] = "mean" , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: int = 64 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: bool = True , UpperCamelCase: str = "gelu" , UpperCamelCase: float = 0.05 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 0.02 , UpperCamelCase: int=True , UpperCamelCase: str = "prob" , UpperCamelCase: int = 5 , UpperCamelCase: bool = True , **UpperCamelCase: Optional[int] , ) -> Union[str, Any]:
# time series specific configuration
snake_case__ = prediction_length
snake_case__ = context_length or prediction_length
snake_case__ = distribution_output
snake_case__ = loss
snake_case__ = input_size
snake_case__ = num_time_features
snake_case__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ = scaling
snake_case__ = num_dynamic_real_features
snake_case__ = num_static_real_features
snake_case__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case__ = cardinality
else:
snake_case__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case__ = embedding_dimension
else:
snake_case__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ = num_parallel_samples
# Transformer architecture configuration
snake_case__ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ = d_model
snake_case__ = encoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = encoder_ffn_dim
snake_case__ = decoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = decoder_layers
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = use_cache
# Informer
snake_case__ = attention_type
snake_case__ = sampling_factor
snake_case__ = distil
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 307
|
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaPipeline
_UpperCAmelCase = [
"image_embeds",
"negative_image_embeds",
]
_UpperCAmelCase = ["image_embeds", "negative_image_embeds"]
_UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase = False
@property
def lowerCAmelCase_ ( self: List[str] ) -> int:
return 32
@property
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
return 32
@property
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
return self.time_input_dim
@property
def lowerCAmelCase_ ( self: str ) -> Dict:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self: str ) -> int:
return 1_00
@property
def lowerCAmelCase_ ( self: Any ) -> Dict:
torch.manual_seed(0 )
snake_case__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCamelCase , )
snake_case__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: int=0 ) -> Union[str, Any]:
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
if str(UpperCamelCase ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase )
else:
snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
snake_case__ = 'cpu'
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase )
snake_case__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
snake_case__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
snake_case__ = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = 'red cat, 4k photo'
snake_case__ = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ = pipeline(
image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(_A ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = use_karras_sigmas
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase )
snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple:
# get log sigma
snake_case__ = np.log(UpperCamelCase )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.dt is None
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self: List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 307
| 1
|
def a_ ( _A ) -> list:
"""simple docstring"""
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
snake_case__ = False
if low == high:
return swapped
snake_case__ = low
snake_case__ = high
while left < right:
if collection[left] > collection[right]:
snake_case__ , snake_case__ = (
collection[right],
collection[left],
)
snake_case__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case__ , snake_case__ = (
collection[right + 1],
collection[left],
)
snake_case__ = True
snake_case__ = low + int((high - low) / 2 )
snake_case__ = circle_sort_util(_A , _A , _A )
snake_case__ = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
snake_case__ = True
while is_not_sorted is True:
snake_case__ = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
__UpperCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : List[str] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 307
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "mctct"
def __init__( self: Tuple , UpperCamelCase: str=80_65 , UpperCamelCase: Optional[int]=15_36 , UpperCamelCase: Union[str, Any]=36 , UpperCamelCase: Optional[int]=61_44 , UpperCamelCase: Any=4 , UpperCamelCase: List[Any]=3_84 , UpperCamelCase: Optional[Any]=9_20 , UpperCamelCase: Any=1e-5 , UpperCamelCase: Optional[int]=0.3 , UpperCamelCase: str="relu" , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.3 , UpperCamelCase: Dict=0.3 , UpperCamelCase: str=1 , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: Dict=2 , UpperCamelCase: Any=1 , UpperCamelCase: List[Any]=0.3 , UpperCamelCase: List[str]=1 , UpperCamelCase: List[Any]=(7,) , UpperCamelCase: Optional[int]=(3,) , UpperCamelCase: Union[str, Any]=80 , UpperCamelCase: List[Any]=1 , UpperCamelCase: Optional[Any]=None , UpperCamelCase: List[str]="sum" , UpperCamelCase: Dict=False , **UpperCamelCase: Dict , ) -> Optional[Any]:
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = max_position_embeddings
snake_case__ = layer_norm_eps
snake_case__ = layerdrop
snake_case__ = hidden_act
snake_case__ = initializer_range
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = pad_token_id
snake_case__ = bos_token_id
snake_case__ = eos_token_id
snake_case__ = conv_glu_dim
snake_case__ = conv_dropout
snake_case__ = num_conv_layers
snake_case__ = input_feat_per_channel
snake_case__ = input_channels
snake_case__ = conv_channels
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case__ = list(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 307
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.