code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
__UpperCamelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : Dict = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (DPMSolverSinglestepScheduler,)
snake_case__ = (('''num_inference_steps''', 2_5),)
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase ,lowerCAmelCase = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , _snake_case=None , **_snake_case ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = 50
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='dpmsolver++' , solver_order=_snake_case , solver_type=_snake_case , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
lowerCAmelCase = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case )
self.check_over_configs(variance_type='learned_range' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(use_karras_sigmas=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase = 384
if "tiny" in model_name:
lowerCAmelCase = [3, 3, 9, 3]
lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [128, 256, 512, 1024]
lowerCAmelCase = 512
if "large" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [192, 384, 768, 1536]
lowerCAmelCase = 768
if "xlarge" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [256, 512, 1024, 2048]
lowerCAmelCase = 1024
# set label information
lowerCAmelCase = 150
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = 'ade20k-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = ConvNextConfig(
depths=_UpperCAmelCase , hidden_sizes=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase = UperNetConfig(
backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ) -> Tuple:
lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> List[Any]:
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCAmelCase = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase = model_name_to_url[model_name]
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['state_dict']
lowerCAmelCase = get_upernet_config(_UpperCAmelCase )
lowerCAmelCase = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
lowerCAmelCase = key.replace('bn' , 'batch_norm' )
lowerCAmelCase = val
# rename keys
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
lowerCAmelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SegformerImageProcessor()
lowerCAmelCase = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase = model(_UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase ,lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'bird'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_snake_case , jax.device_count() )
lowerCAmelCase = replicate(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase ,lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'Chef in the kitchen'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_snake_case , jax.device_count() )
lowerCAmelCase = replicate(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = shard(_snake_case )
lowerCAmelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase : Optional[int] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
__UpperCamelCase : Dict = []
__UpperCamelCase : Any = []
__UpperCamelCase : str = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__UpperCamelCase : List[str] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'''emoji''': True,
},
}
]
__UpperCamelCase : Dict = 0
for log in Path().glob('''*.log'''):
__UpperCamelCase : Dict = 0
with open(log, '''r''') as f:
for line in f:
__UpperCamelCase : List[str] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
__UpperCamelCase : str = line['''nodeid''']
if line.get('''duration''', None) is not None:
__UpperCamelCase : Any = f'''{line['duration']:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase : Any = []
log.unlink()
__UpperCamelCase : int = ''''''
__UpperCamelCase : Union[str, Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : str = {}
for test in failed_tests:
__UpperCamelCase : Tuple = test[0].split('''::''')
__UpperCamelCase : str = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
__UpperCamelCase : Optional[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase : Union[str, Any] = [test[0] for test in failed_table]
__UpperCamelCase : Dict = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase : Union[str, Any] = '''Too many failed tests, please see the full report in the Action results.'''
__UpperCamelCase : str = len(err) + 10
__UpperCamelCase : Union[str, Any] = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__UpperCamelCase : int = '''No failed tests! 🤗'''
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
__UpperCamelCase : Tuple = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
__UpperCamelCase : Any = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__UpperCamelCase : int = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__UpperCamelCase : Dict = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__UpperCamelCase : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
__UpperCamelCase : str = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase : Optional[int] = row[0]
else:
__UpperCamelCase : List[str] = ''''''
__UpperCamelCase : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {'''vocab_file''': '''vocab.txt'''}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__UpperCamelCase : Dict = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__UpperCamelCase : List[str] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ConvBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ):
try:
with open(_UpperCAmelCase , 'rb' ) as flax_state_f:
lowerCAmelCase = from_bytes(_UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _UpperCAmelCase ) ).values()
if any(_UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowerCAmelCase = jax.tree_util.tree_map(
lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCAmelCase )
lowerCAmelCase = ''
lowerCAmelCase = flatten_dict(_UpperCAmelCase , sep='.' )
lowerCAmelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase = []
lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight']
lowerCAmelCase = jnp.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight']
lowerCAmelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_UpperCAmelCase ):
lowerCAmelCase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
lowerCAmelCase = '.'.join(_UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCAmelCase = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , np.ndarray ) else flax_tensor
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase )
# remove from missing keys
missing_keys.remove(_UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCAmelCase )
pt_model.load_state_dict(_UpperCAmelCase )
# re-transform missing_keys to list
lowerCAmelCase = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_UpperCAmelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a ( a__ ):
snake_case__ = '''gpt_neox'''
def __init__( self , _snake_case=5_04_32 , _snake_case=61_44 , _snake_case=44 , _snake_case=64 , _snake_case=2_45_76 , _snake_case="gelu" , _snake_case=0.25 , _snake_case=1_00_00 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=20_48 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=0 , _snake_case=2 , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = rotary_pct
lowerCAmelCase = rotary_emb_base
lowerCAmelCase = attention_dropout
lowerCAmelCase = hidden_dropout
lowerCAmelCase = classifier_dropout
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = use_parallel_residual
lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
lowerCAmelCase = self.rope_scaling.get('type' , _snake_case )
lowerCAmelCase = self.rope_scaling.get('factor' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_58_18,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(_UpperCAmelCase )}'
)
raise ValueError(_UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase = tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase = output[output != -float('inf' )]
lowerCAmelCase = tf.cast(
tf.where(tf.not_equal(_snake_case , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-12 )
tf.debugging.assert_equal(_snake_case , _snake_case )
@require_tf
class a ( unittest.TestCase , a__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
snake_case__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = 2
lowerCAmelCase = 2
class a ( tf.Module ):
def __init__( self , _snake_case ):
"""simple docstring"""
super(_snake_case , self ).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2, 0], [1_02, 1_03]]
lowerCAmelCase = [[1, 0], [1, 1]]
lowerCAmelCase = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase = tf.saved_model.load(_snake_case ).signatures['serving_default']
for batch_size in range(1 , len(_snake_case ) + 1 ):
lowerCAmelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase = serving_func(**_snake_case )['sequences']
lowerCAmelCase = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = 1
lowerCAmelCase = 2
class a ( tf.Module ):
def __init__( self , _snake_case ):
"""simple docstring"""
super(_snake_case , self ).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2], [1_02, 1_03]]
lowerCAmelCase = [[1], [1, 1]]
lowerCAmelCase = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase = tf.saved_model.load(_snake_case ).signatures['serving_default']
for input_row in range(len(_snake_case ) ):
lowerCAmelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase = serving_func(**_snake_case )['sequences']
lowerCAmelCase = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
@require_tensorflow_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_snake_case )
class a ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_snake_case , 'spiece.model' ) , 'rb' ).read() )
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = text.pad_model_inputs(
_snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase = self.model.generate(input_ids=_snake_case , attention_mask=_snake_case )
return self.tokenizer.detokenize(_snake_case )
lowerCAmelCase = CompleteSentenceTransformer()
lowerCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
lowerCAmelCase = complete_model(_snake_case )
lowerCAmelCase = tf.keras.Model(_snake_case , _snake_case )
keras_model.save(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase = 14
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = 'Hello, my dog is cute and'
lowerCAmelCase = tokenizer(_snake_case , return_tensors='tf' )
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase = bart_tokenizer(_snake_case , return_tensors='tf' ).input_ids
lowerCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase = bart_model.generate(_snake_case ).numpy()
class a ( a__ ):
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
return super().call(_snake_case , **_snake_case )
lowerCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase = bart_model.generate(_snake_case , foo='bar' ).numpy()
self.assertTrue(np.array_equal(_snake_case , _snake_case ) )
class a ( bart_model.model.encoder.__class__ ):
def UpperCamelCase__ ( self , _snake_case , **_snake_case ):
"""simple docstring"""
return super().call(_snake_case , **_snake_case )
lowerCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase = bart_model.generate(_snake_case ).numpy()
with self.assertRaises(_snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_snake_case , foo='bar' )
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = module
lowerCAmelCase = nn.Sequential(
nn.Linear(module.in_features , _snake_case , bias=_snake_case ) , nn.Linear(_snake_case , module.out_features , bias=_snake_case ) , )
lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_snake_case )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.module(_snake_case , *_snake_case , **_snake_case ) + self.adapter(_snake_case )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
snake_case__ = '''bigscience/bloom-1b7'''
# Constant values
snake_case__ = 2.1_09_65_95_52_69_25_74
snake_case__ = '''Hello my name is'''
snake_case__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_abit.config
self.assertTrue(hasattr(_snake_case , 'quantization_config' ) )
lowerCAmelCase = config.to_dict()
lowerCAmelCase = config.to_diff_dict()
lowerCAmelCase = config.to_json_string()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowerCAmelCase = self.model_fpaa.get_memory_footprint()
lowerCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_snake_case , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BitsAndBytesConfig()
lowerCAmelCase = True
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BitsAndBytesConfig()
with self.assertRaises(_snake_case ):
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_snake_case , load_in_abit=_snake_case , device_map='auto' , bnb_abit_quant_type='nf4' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_snake_case ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_snake_case ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase = self.model_fpaa.to(torch.floataa )
lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase = self.model_fpaa.float()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_snake_case , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase = 't5-small'
lowerCAmelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase = 'Translate in German: Hello, my dog is cute'
def UpperCamelCase__ ( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase = None
# test with `t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
# test with `flan-t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
lowerCAmelCase = modules
def UpperCamelCase__ ( self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
# test with `flan-t5-small`
lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' )
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase = model.generate(**_snake_case )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# model_name
lowerCAmelCase = 'bigscience/bloom-560m'
lowerCAmelCase = 't5-small'
# Different types of model
lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# Sequence classification model
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_snake_case , device_map='auto' )
# CausalLM model
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' )
# Seq2seq model
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_snake_case , device_map='auto' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_snake_case , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
lowerCAmelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS )
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'facebook/opt-350m'
super().setUp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_snake_case ) ):
lowerCAmelCase = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase = model.forward(**_snake_case )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_snake_case , _snake_case ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_snake_case , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a ( a__ ):
snake_case__ = '''gpt2-xl'''
snake_case__ = 3.31_91_85_48_54_15_21_87
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionDiffEditPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
lowerCAmelCase = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_zero=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' )
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' )
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_snake_case , _snake_case , _snake_case )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_snake_case )
lowerCAmelCase = self.pipeline_class.from_pretrained(_snake_case )
pipe_loaded.to(_snake_case )
pipe_loaded.set_progress_bar_config(disable=_snake_case )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_snake_case , _snake_case ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe_loaded(**_snake_case )[0]
lowerCAmelCase = np.abs(output - output_loaded ).max()
self.assertLess(_snake_case , 1E-4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_mask_inputs(_snake_case )
lowerCAmelCase = pipe.generate_mask(**_snake_case )
lowerCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase = np.array([0] * 9 )
lowerCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inversion_inputs(_snake_case )
lowerCAmelCase = pipe.invert(**_snake_case ).images
lowerCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = {'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowerCAmelCase = DPMSolverMultistepScheduler(**_snake_case )
lowerCAmelCase = DPMSolverMultistepInverseScheduler(**_snake_case )
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inversion_inputs(_snake_case )
lowerCAmelCase = pipe.invert(**_snake_case ).images
lowerCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowerCAmelCase = raw_image.convert('RGB' ).resize((7_68, 7_68) )
lowerCAmelCase = raw_image
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=_snake_case , torch_dtype=torch.floataa )
lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'a bowl of fruit'
lowerCAmelCase = 'a bowl of pears'
lowerCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=_snake_case , target_prompt=_snake_case , generator=_snake_case , )
lowerCAmelCase = pipe.invert(
prompt=_snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=_snake_case ).latents
lowerCAmelCase = pipe(
prompt=_snake_case , mask_image=_snake_case , image_latents=_snake_case , generator=_snake_case , negative_prompt=_snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
lowerCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=_snake_case , torch_dtype=torch.floataa )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'a bowl of fruit'
lowerCAmelCase = 'a bowl of pears'
lowerCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=_snake_case , target_prompt=_snake_case , generator=_snake_case , )
lowerCAmelCase = pipe.invert(
prompt=_snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=_snake_case , num_inference_steps=25 , ).latents
lowerCAmelCase = pipe(
prompt=_snake_case , mask_image=_snake_case , image_latents=_snake_case , generator=_snake_case , negative_prompt=_snake_case , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
lowerCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import argparse
import os
import re
__UpperCamelCase : List[str] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__UpperCamelCase : int = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__UpperCamelCase : str = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__UpperCamelCase : Tuple = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__UpperCamelCase : int = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__UpperCamelCase : Tuple = re.compile(R'''\[([^\]]+)\]''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
lowerCAmelCase = _re_indent.search(_UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]="" , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None ):
lowerCAmelCase = 0
lowerCAmelCase = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_UpperCAmelCase ):
index += 1
lowerCAmelCase = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase = [lines[index]]
index += 1
while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_UpperCAmelCase ) )
if index < len(_UpperCAmelCase ) - 1:
lowerCAmelCase = [lines[index + 1]]
index += 1
else:
lowerCAmelCase = []
else:
blocks.append('\n'.join(_UpperCAmelCase ) )
lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_UpperCAmelCase ) > 0:
blocks.append('\n'.join(_UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_UpperCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
def _inner(_UpperCAmelCase : Dict ):
return key(_UpperCAmelCase ).lower().replace('_' , '' )
return _inner
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=None ):
# If no key is provided, we use a noop.
def noop(_UpperCAmelCase : List[str] ):
return x
if key is None:
lowerCAmelCase = noop
# Constants are all uppercase, they go first.
lowerCAmelCase = [obj for obj in objects if key(_UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()]
lowerCAmelCase = ignore_underscore(_UpperCAmelCase )
return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
# This inner function sort imports between [ ].
def _replace(_UpperCAmelCase : List[str] ):
lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowerCAmelCase = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(_UpperCAmelCase )] ) + "]"
lowerCAmelCase = import_statement.split('\n' )
if len(_UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )
lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase = keys[:-1]
lowerCAmelCase = get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(_UpperCAmelCase )] )
return "\n".join(_UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase = _re_bracket_content.sub(_replace , _UpperCAmelCase )
return import_statement
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=True ):
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase = split_code_in_indented_blocks(
_UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase = main_blocks[block_idx]
lowerCAmelCase = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase = 0
while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase = len(_UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(_UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None]
lowerCAmelCase = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase = 0
lowerCAmelCase = []
for i in range(len(_UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_UpperCAmelCase ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict=True ):
lowerCAmelCase = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
lowerCAmelCase = sort_imports(os.path.join(_UpperCAmelCase , '__init__.py' ) , check_only=_UpperCAmelCase )
if result:
lowerCAmelCase = [os.path.join(_UpperCAmelCase , '__init__.py' )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(F'Would overwrite {len(_UpperCAmelCase )} files, run `make style`.' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__UpperCamelCase : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import numpy
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase = numpy.zeros(output_array.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = input_arr
lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : numpy.ndarray ):
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_UpperCAmelCase , output_array=_UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_UpperCAmelCase , iterations=10 , give_loss=_UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ):
# Initialise PyTorch model
lowerCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ):
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = job['started_at']
lowerCAmelCase = job['completed_at']
lowerCAmelCase = date_parser.parse(_UpperCAmelCase )
lowerCAmelCase = date_parser.parse(_UpperCAmelCase )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
lowerCAmelCase = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} )
lowerCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_UpperCAmelCase ):
lowerCAmelCase = requests.get(url + F'&page={i + 2}' , headers=_UpperCAmelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = get_job_time(args.workflow_run_id)
__UpperCamelCase : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : int = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCamelCase : Dict = concatenate_datasets
__UpperCamelCase : Optional[int] = DownloadConfig
__UpperCamelCase : List[Any] = DownloadManager
__UpperCamelCase : List[str] = DownloadMode
__UpperCamelCase : Optional[Any] = DownloadConfig
__UpperCamelCase : Any = DownloadMode
__UpperCamelCase : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a ( a__ ):
# to overwrite at feature extractactor specific tests
snake_case__ = None
snake_case__ = None
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , 'feature_size' ) )
self.assertTrue(hasattr(_snake_case , 'sampling_rate' ) )
self.assertTrue(hasattr(_snake_case , 'padding_value' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1E-3 ):
return False
return True
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = self.feat_extract_tester.seq_length_diff
lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase = self.feat_extract_tester.min_seq_length
lowerCAmelCase = self.feat_extract_tester.batch_size
lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )
lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding='max_length' )[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=_snake_case , return_tensors='np' )
lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , pad_to_multiple_of=10 )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=_snake_case )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors='np' , )
lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase__ ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1E-3 ):
return False
return True
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_snake_case )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_snake_case , )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors='np' , )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_snake_case )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding='longest' , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding='longest' , truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding='max_length' , truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase = 12
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
lowerCAmelCase = input_a[input_name]
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , )
lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**_snake_case )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**_snake_case )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = min(_snake_case )
lowerCAmelCase = feat_extract.pad(
_snake_case , padding='max_length' , max_length=_snake_case , truncation=_snake_case , return_tensors='np' )
self.assertIn('attention_mask' , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__UpperCamelCase : List[str] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__UpperCamelCase : Tuple = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__UpperCamelCase : Dict = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__UpperCamelCase : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__UpperCamelCase : List[str] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__UpperCamelCase : List[Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__UpperCamelCase : Union[str, Any] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
lowerCAmelCase ,lowerCAmelCase = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
lowerCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCAmelCase ,lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ) -> Any:
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Dict:
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Optional[int]:
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
lowerCAmelCase = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
lowerCAmelCase = poker_hands.copy()
shuffle(_UpperCAmelCase )
lowerCAmelCase = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
# Test that five high straights are compared correctly.
lowerCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> Dict:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCAmelCase = PokerHand('2C 4S AS 3D 5C' )
lowerCAmelCase = True
lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCAmelCase = 0
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'poker_hands.txt' )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
lowerCAmelCase = line[:14].strip()
lowerCAmelCase = line[15:].strip()
lowerCAmelCase ,lowerCAmelCase = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
lowerCAmelCase = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : int = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import numpy as np
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=[1, 1, 2] , _snake_case=1 , _snake_case=32 , _snake_case=4 , _snake_case=8 , _snake_case=37 , _snake_case="gelu_new" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=5_12 , _snake_case=3 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=False , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = block_sizes
lowerCAmelCase = num_decoder_layers
lowerCAmelCase = d_model
lowerCAmelCase = n_head
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = 2
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase = self.num_hidden_layers + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase = False
lowerCAmelCase = TFFunnelModel(config=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase = False
lowerCAmelCase = TFFunnelModel(config=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelBaseModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase = False
lowerCAmelCase = TFFunnelBaseModel(config=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase = False
lowerCAmelCase = TFFunnelBaseModel(config=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForPreTraining(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForMaskedLM(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFFunnelForSequenceClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFFunnelForMultipleChoice(config=_snake_case )
lowerCAmelCase = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFFunnelForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForQuestionAnswering(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFFunnelModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@require_tf
class a ( a__ , unittest.TestCase ):
snake_case__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFFunnelModelTester(self , base=_snake_case )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase = min(_UpperCAmelCase , _UpperCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCAmelCase = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
lowerCAmelCase = 'huggingface/label-files'
if "o365" in model_name:
lowerCAmelCase = 366
lowerCAmelCase = 'object365-id2label.json'
else:
lowerCAmelCase = 91
lowerCAmelCase = 'coco-detection-id2label.json'
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:dim, :]
lowerCAmelCase = in_proj_bias[: dim]
lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase = in_proj_weight[
-dim :, :
]
lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
# transformer decoder self-attention layers
lowerCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:hidden_size, :]
lowerCAmelCase = in_proj_bias[:hidden_size]
lowerCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size:, :]
lowerCAmelCase = in_proj_bias[-hidden_size:]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Any ):
lowerCAmelCase = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "input_proj" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_UpperCAmelCase )
# load image processor
lowerCAmelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCAmelCase = prepare_img()
lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase = encoding['pixel_values']
lowerCAmelCase = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
lowerCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
lowerCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__UpperCamelCase : List[str] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__UpperCamelCase : Optional[Any] = '''▁'''
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _snake_case , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case="[CLS]" , _snake_case="[SEP]" , _snake_case="<unk>" , _snake_case="[SEP]" , _snake_case="<pad>" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , _snake_case )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(_snake_case )
lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_snake_case )
lowerCAmelCase = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Any = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__UpperCamelCase : Optional[int] = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class a :
snake_case__ = True
snake_case__ = None
# Automatically constructed
snake_case__ = '''PIL.Image.Image'''
snake_case__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
snake_case__ = field(default='''Image''' , init=a__ , repr=a__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = np.array(_snake_case )
if isinstance(_snake_case , _snake_case ):
return {"path": value, "bytes": None}
elif isinstance(_snake_case , _snake_case ):
return {"path": None, "bytes": value}
elif isinstance(_snake_case , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_snake_case )
elif isinstance(_snake_case , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_snake_case )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
lowerCAmelCase = {}
lowerCAmelCase ,lowerCAmelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(_snake_case ):
lowerCAmelCase = PIL.Image.open(_snake_case )
else:
lowerCAmelCase = path.split('::' )[-1]
try:
lowerCAmelCase = string_to_dict(_snake_case , config.HUB_DATASETS_URL )['repo_id']
lowerCAmelCase = token_per_repo_id.get(_snake_case )
except ValueError:
lowerCAmelCase = None
with xopen(_snake_case , 'rb' , use_auth_token=_snake_case ) as f:
lowerCAmelCase = BytesIO(f.read() )
lowerCAmelCase = PIL.Image.open(bytes_ )
else:
lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCAmelCase = storage.field('bytes' )
else:
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCAmelCase = storage.field('path' )
else:
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCAmelCase = pa.array(
[encode_np_array(np.array(_snake_case ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case ):
with xopen(_snake_case , 'rb' ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
def _SCREAMING_SNAKE_CASE ():
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : "PIL.Image.Image" ):
lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase = image.format
else:
lowerCAmelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_UpperCAmelCase , format=_UpperCAmelCase )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : "PIL.Image.Image" ):
if hasattr(_UpperCAmelCase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
lowerCAmelCase = array.dtype
lowerCAmelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
lowerCAmelCase = dtype.kind
lowerCAmelCase = dtype.itemsize
lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase = dtype_byteorder + dtype_kind + str(_UpperCAmelCase )
lowerCAmelCase = np.dtype(_UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowerCAmelCase = PIL.Image.fromarray(array.astype(_UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
lowerCAmelCase ,lowerCAmelCase = first_non_null_value(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCAmelCase , np.ndarray ):
lowerCAmelCase = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowerCAmelCase = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a ( a__ ):
snake_case__ = '''fnet'''
def __init__( self , _snake_case=3_20_00 , _snake_case=7_68 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu_new" , _snake_case=0.1 , _snake_case=5_12 , _snake_case=4 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=False , _snake_case=5_12 , _snake_case=3 , _snake_case=1 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ):
lowerCAmelCase = -1
lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase = a * b * c
if candidate >= product:
lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCAmelCase = dataset
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
lowerCAmelCase = os.path.join(self.tmpdirname , 'dataset' )
lowerCAmelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
lowerCAmelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
lowerCAmelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , 'wb' ) )
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
lowerCAmelCase = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='pt' , )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , _snake_case ) # check for doc token related keys in dictionary.
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any]=None ):
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser('env' )
else:
lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
lowerCAmelCase = torch.__version__
lowerCAmelCase = torch.cuda.is_available()
lowerCAmelCase = is_xpu_available()
lowerCAmelCase = is_npu_available()
lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'PyTorch XPU available': str(_UpperCAmelCase ),
'PyTorch NPU available': str(_UpperCAmelCase ),
'System RAM': F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
lowerCAmelCase = (
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else F'\t{accelerate_config}'
)
print(_UpperCAmelCase )
lowerCAmelCase = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = env_command_parser()
lowerCAmelCase = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = inspect.getfile(accelerate.test_utils )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = Accelerator()
__UpperCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10)
__UpperCamelCase : Union[str, Any] = torch.randint(0, 10, shape).to(accelerator.device)
__UpperCamelCase : List[Any] = ''''''
__UpperCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__UpperCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__UpperCamelCase : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowerCAmelCase = np.concatenate(_UpperCAmelCase , axis=0 )
lowerCAmelCase = np.array(_UpperCAmelCase ).astype(np.floataa ) / 255.0
lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase = 2.0 * image - 1.0
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase = torch.cat(_UpperCAmelCase , dim=0 )
return image
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=0.9995 ):
if not isinstance(_UpperCAmelCase , np.ndarray ):
lowerCAmelCase = True
lowerCAmelCase = va.device
lowerCAmelCase = va.cpu().numpy()
lowerCAmelCase = va.cpu().numpy()
lowerCAmelCase = np.sum(va * va / (np.linalg.norm(_UpperCAmelCase ) * np.linalg.norm(_UpperCAmelCase )) )
if np.abs(_UpperCAmelCase ) > DOT_THRESHOLD:
lowerCAmelCase = (1 - t) * va + t * va
else:
lowerCAmelCase = np.arccos(_UpperCAmelCase )
lowerCAmelCase = np.sin(_UpperCAmelCase )
lowerCAmelCase = theta_a * t
lowerCAmelCase = np.sin(_UpperCAmelCase )
lowerCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCAmelCase = sin_theta_t / sin_theta_a
lowerCAmelCase = sa * va + sa * va
if inputs_are_torch:
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
return va
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = F.normalize(_UpperCAmelCase , dim=-1 )
lowerCAmelCase = F.normalize(_UpperCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
for param in model.parameters():
lowerCAmelCase = value
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_snake_case , text_encoder=_snake_case , clip_model=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , coca_model=_snake_case , coca_tokenizer=_snake_case , coca_transform=_snake_case , )
lowerCAmelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , _snake_case )
else feature_extractor.size['shortest_edge']
)
lowerCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _snake_case )
set_requires_grad(self.clip_model , _snake_case )
def UpperCamelCase__ ( self , _snake_case = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = min(int(num_inference_steps * strength ) , _snake_case )
lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if not isinstance(_snake_case , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(_snake_case )}' )
lowerCAmelCase = image.to(device=_snake_case , dtype=_snake_case )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_snake_case )
]
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
else:
lowerCAmelCase = self.vae.encode(_snake_case ).latent_dist.sample(_snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase = 0.18_215 * init_latents
lowerCAmelCase = init_latents.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = randn_tensor(init_latents.shape , generator=_snake_case , device=_snake_case , dtype=_snake_case )
# get latents
lowerCAmelCase = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = init_latents
return latents
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.coca_transform(_snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.feature_extractor.preprocess(_snake_case )
lowerCAmelCase = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCAmelCase = self.clip_model.get_image_features(_snake_case )
lowerCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_snake_case )
lowerCAmelCase = image_embeddings_clip.repeat_interleave(_snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = latents.detach().requires_grad_()
lowerCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
lowerCAmelCase = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCAmelCase = self.scheduler.alphas_cumprod[timestep]
lowerCAmelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCAmelCase = torch.sqrt(_snake_case )
lowerCAmelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _snake_case ):
lowerCAmelCase = self.scheduler.sigmas[index]
lowerCAmelCase = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase = 1 / 0.18_215 * sample
lowerCAmelCase = self.vae.decode(_snake_case ).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = transforms.Resize(self.feature_extractor_size )(_snake_case )
lowerCAmelCase = self.normalize(_snake_case ).to(latents.dtype )
lowerCAmelCase = self.clip_model.get_image_features(_snake_case )
lowerCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_snake_case )
lowerCAmelCase = spherical_dist_loss(_snake_case , _snake_case ).mean() * clip_guidance_scale
lowerCAmelCase = -torch.autograd.grad(_snake_case , _snake_case )[0]
if isinstance(self.scheduler , _snake_case ):
lowerCAmelCase = latents.detach() + grads * (sigma**2)
lowerCAmelCase = noise_pred_original
else:
lowerCAmelCase = noise_pred_original - torch.sqrt(_snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = None , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 0.6 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = 1 , _snake_case = 0.0 , _snake_case = 1_00 , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = 0.8 , _snake_case = 0.1 , _snake_case = 0.1 , ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(_snake_case )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(_snake_case , torch.Generator ) and batch_size > 1:
lowerCAmelCase = [generator] + [None] * (batch_size - 1)
lowerCAmelCase = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowerCAmelCase = [x[0] for x in coca_is_none if x[1]]
lowerCAmelCase = ', '.join(_snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_snake_case ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCAmelCase = self.get_image_description(_snake_case )
if style_prompt is None:
if len(_snake_case ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCAmelCase = self.get_image_description(_snake_case )
# get prompt text embeddings for content and style
lowerCAmelCase = self.tokenizer(
_snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_snake_case , return_tensors='pt' , )
lowerCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase = self.tokenizer(
_snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_snake_case , return_tensors='pt' , )
lowerCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase = slerp(_snake_case , _snake_case , _snake_case )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase = text_embeddings.repeat_interleave(_snake_case , dim=0 )
# set timesteps
lowerCAmelCase = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCAmelCase = {}
if accepts_offset:
lowerCAmelCase = 1
self.scheduler.set_timesteps(_snake_case , **_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCAmelCase ,lowerCAmelCase = self.get_timesteps(_snake_case , _snake_case , self.device )
lowerCAmelCase = timesteps[:1].repeat(_snake_case )
# Preprocess image
lowerCAmelCase = preprocess(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = self.prepare_latents(
_snake_case , _snake_case , _snake_case , text_embeddings.dtype , self.device , _snake_case )
lowerCAmelCase = preprocess(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = self.prepare_latents(
_snake_case , _snake_case , _snake_case , text_embeddings.dtype , self.device , _snake_case )
lowerCAmelCase = slerp(_snake_case , _snake_case , _snake_case )
if clip_guidance_scale > 0:
lowerCAmelCase = self.get_clip_image_embeddings(_snake_case , _snake_case )
lowerCAmelCase = self.get_clip_image_embeddings(_snake_case , _snake_case )
lowerCAmelCase = slerp(
_snake_case , _snake_case , _snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase = content_text_input.input_ids.shape[-1]
lowerCAmelCase = self.tokenizer([''] , padding='max_length' , max_length=_snake_case , return_tensors='pt' )
lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCAmelCase = uncond_embeddings.repeat_interleave(_snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCAmelCase = torch.randn(_snake_case , generator=_snake_case , device='cpu' , dtype=_snake_case ).to(
self.device )
else:
lowerCAmelCase = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
# check if the scheduler accepts generator
lowerCAmelCase = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCAmelCase = generator
with self.progress_bar(total=_snake_case ):
for i, t in enumerate(_snake_case ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
lowerCAmelCase = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase ,lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCAmelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCAmelCase ,lowerCAmelCase = self.cond_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase = 1 / 0.18_215 * latents
lowerCAmelCase = self.vae.decode(_snake_case ).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (DEISMultistepScheduler,)
snake_case__ = (('''num_inference_steps''', 2_5),)
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase ,lowerCAmelCase = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , _snake_case=None , **_snake_case ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , 'set_timesteps' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , 'set_timesteps' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='deis' , solver_order=_snake_case , solver_type=_snake_case , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
lowerCAmelCase = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
import pprint
import requests
__UpperCamelCase : Any = '''https://zenquotes.io/api'''
def _SCREAMING_SNAKE_CASE ():
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _SCREAMING_SNAKE_CASE ():
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
__UpperCamelCase : Any = random_quotes()
pprint.pprint(response)
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__UpperCamelCase : Any = TypeVar('''T''')
__UpperCamelCase : Dict = Union[List[T], Tuple[T, ...]]
__UpperCamelCase : str = Union[T, List[T], Dict[str, T]]
__UpperCamelCase : Optional[Any] = Union[str, bytes, os.PathLike]
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (IPNDMScheduler,)
snake_case__ = (('''num_inference_steps''', 5_0),)
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {'num_train_timesteps': 10_00}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , 'set_timesteps' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , 'set_timesteps' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case , time_step=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_snake_case , time_step=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__UpperCamelCase : int = '''src/transformers'''
# Matches is_xxx_available()
__UpperCamelCase : str = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__UpperCamelCase : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCamelCase : Optional[Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__UpperCamelCase : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__UpperCamelCase : Optional[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCamelCase : Dict = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCamelCase : Optional[Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCamelCase : Optional[int] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__UpperCamelCase : Dict = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__UpperCamelCase : Any = re.compile(R'''^\s*try:''')
# Catches a line with else:
__UpperCamelCase : Union[str, Any] = re.compile(R'''^\s*else:''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
if _re_test_backend.search(_UpperCAmelCase ) is None:
return None
lowerCAmelCase = [b[0] for b in _re_backend.findall(_UpperCAmelCase )]
backends.sort()
return "_and_".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = 0
while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCAmelCase ):
lowerCAmelCase = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0]
lowerCAmelCase = re.findall('\[([^\]]+)\]' , _UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowerCAmelCase = _re_import_struct_key_value.search(_UpperCAmelCase )
if single_line_import_search is not None:
lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None:
lowerCAmelCase = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(', ' )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_between_brackets.search(_UpperCAmelCase ) is not None:
lowerCAmelCase = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(', ' )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_quote_object.search(_UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase = []
while (
line_index < len(_UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
def find_duplicates(_UpperCAmelCase : Any ):
return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase = []
for key in import_dict_objects.keys():
lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
lowerCAmelCase = os.path.join(_UpperCAmelCase , '__init__.py' )
lowerCAmelCase = parse_init(_UpperCAmelCase )
if objects is not None:
lowerCAmelCase = analyze_results(*_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
raise ValueError('\n\n'.join(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = []
for path, directories, files in os.walk(_UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCAmelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
lowerCAmelCase = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) )
lowerCAmelCase = short_path.replace(os.path.sep , '.' )
submodules.append(_UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) )
lowerCAmelCase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_UpperCAmelCase )
return submodules
__UpperCamelCase : Union[str, Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _SCREAMING_SNAKE_CASE ():
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_UpperCAmelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase = spec.loader.load_module()
lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class a ( a__ ):
snake_case__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
snake_case__ = field(default=a__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
snake_case__ = field(default=a__ , metadata={'''help''': '''whether to use adafactor'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
snake_case__ = field(default=a__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
snake_case__ = field(
default='''linear''' , metadata={'''help''': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCAmelCase = range(3 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=1 , **_UpperCAmelCase : Tuple ):
lowerCAmelCase = factor * value
lowerCAmelCase = value
while not is_prime(_UpperCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_UpperCAmelCase )
return value
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( a__ , unittest.TestCase ):
snake_case__ = True
snake_case__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class a ( a__ ):
snake_case__ = 1
snake_case__ = 2
snake_case__ = 3
snake_case__ = 4
snake_case__ = 5
@dataclass
class a ( a__ ):
snake_case__ = 4_2
class a :
snake_case__ = SCHEDULER_CONFIG_NAME
snake_case__ = ['''dtype''']
snake_case__ = []
snake_case__ = True
@classmethod
def UpperCamelCase__ ( cls , _snake_case = None , _snake_case = None , _snake_case=False , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , )
lowerCAmelCase ,lowerCAmelCase = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case )
if hasattr(_snake_case , 'create_state' ) and getattr(_snake_case , 'has_state' , _snake_case ):
lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , **_snake_case ):
"""simple docstring"""
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase = [
getattr(_snake_case , _snake_case ) for c in compatible_classes_str if hasattr(_snake_case , _snake_case )
]
return compatible_classes
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Tuple[int] ):
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any]=0.999 , _UpperCAmelCase : Union[str, Any]=jnp.floataa ):
def alpha_bar(_UpperCAmelCase : Optional[Any] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase = []
for i in range(_UpperCAmelCase ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) , _UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase , dtype=_UpperCAmelCase )
@flax.struct.dataclass
class a :
snake_case__ = 4_2
snake_case__ = 4_2
snake_case__ = 4_2
@classmethod
def UpperCamelCase__ ( cls , _snake_case ):
"""simple docstring"""
lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase = 1.0 - betas
lowerCAmelCase = jnp.cumprod(_snake_case , axis=0 )
return cls(
alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase = state.alphas_cumprod
lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase = sqrt_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase ,lowerCAmelCase = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase ,lowerCAmelCase = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( a__ ):
snake_case__ = '''MCTCTFeatureExtractor'''
snake_case__ = '''AutoTokenizer'''
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def __call__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCAmelCase = kwargs.pop('raw_speech' )
else:
lowerCAmelCase = kwargs.pop('audio' , _snake_case )
lowerCAmelCase = kwargs.pop('sampling_rate' , _snake_case )
lowerCAmelCase = kwargs.pop('text' , _snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if text is not None:
lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case , **_snake_case )
lowerCAmelCase = kwargs.pop('input_features' , _snake_case )
lowerCAmelCase = kwargs.pop('labels' , _snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if input_features is not None:
lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case )
if labels is not None:
lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase = labels['input_ids']
return input_features
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_UpperCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCamelCase : int = random.Random()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=1.0 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=4_00 , _snake_case=20_00 , _snake_case=1 , _snake_case=0.0 , _snake_case=1_60_00 , _snake_case=True , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , _snake_case=False , _snake_case=False ):
"""simple docstring"""
def _flatten(_snake_case ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
class a ( a__ , unittest.TestCase ):
snake_case__ = WavaVecaFeatureExtractor
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(_snake_case , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(_snake_case , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase = np.asarray(_snake_case )
lowerCAmelCase = feat_extract(_snake_case , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(_snake_case , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 16_00, None]
for max_length, padding in zip(_snake_case , _snake_case ):
lowerCAmelCase = feat_extract(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_00 , 14_00 , 2_00 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 16_00, None]
for max_length, padding in zip(_snake_case , _snake_case ):
lowerCAmelCase = feat_extract(_snake_case , max_length=_snake_case , padding=_snake_case )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase = feat_extract(
_snake_case , truncation=_snake_case , max_length=10_00 , padding='max_length' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase = feat_extract(
_snake_case , truncation=_snake_case , max_length=10_00 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase = feat_extract(
_snake_case , truncation=_snake_case , max_length=20_00 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_snake_case )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__UpperCamelCase : Union[str, Any] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowerCAmelCase = 10.0
lowerCAmelCase = 4
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = 'evil space-punk bird'
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
lowerCAmelCase = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ : int = '''#'''
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = {}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self._trie
for char in text:
if char not in trie:
lowerCAmelCase = {}
lowerCAmelCase = trie[char]
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for c, v in d.items():
lowerCAmelCase = [' '] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = Trie()
SCREAMING_SNAKE_CASE_ : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = trie.find_word(_UpperCAmelCase )
return tuple(string + word for word in suffixes )
def _SCREAMING_SNAKE_CASE ():
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : str = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
__UpperCamelCase : Tuple = {'''bert_for_seq_generation''': 512}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = []
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<::::>" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , sep_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.piece_to_id(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.sp_model.IdToPiece(_snake_case )
return token
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) # Size of every segment
lowerCAmelCase = [True] * (end + 1)
lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCAmelCase )
for i in range(start * start , end + 1 , _UpperCAmelCase ):
lowerCAmelCase = False
start += 1
prime += in_prime
lowerCAmelCase = end + 1
lowerCAmelCase = min(2 * end , _UpperCAmelCase )
while low <= n:
lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCAmelCase , high + 1 , _UpperCAmelCase ):
lowerCAmelCase = False
for j in range(len(_UpperCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCAmelCase = high + 1
lowerCAmelCase = min(high + end , _UpperCAmelCase )
return prime
print(sieve(10**6))
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "bias" in name:
lowerCAmelCase = 'bias'
else:
lowerCAmelCase = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=True ):
if config_path is not None:
lowerCAmelCase = HubertConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCAmelCase )
lowerCAmelCase = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
lowerCAmelCase = HubertForCTC(_UpperCAmelCase )
else:
lowerCAmelCase = HubertModel(_UpperCAmelCase )
if is_finetuned:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
lowerCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(_snake_case , return_tensors='np' )
lowerCAmelCase = processor(images=_snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=_snake_case )
lowerCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(images=_snake_case , visual_prompt=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(_snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCamelCase : Any = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = g_cost
lowerCAmelCase = parent
lowerCAmelCase = self.calculate_heuristic()
lowerCAmelCase = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.pos_x - self.goal_x
lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case ) + abs(_snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _snake_case ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case )
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _snake_case )
lowerCAmelCase = [self.start]
lowerCAmelCase = []
lowerCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case )
self.closed_nodes.append(_snake_case )
lowerCAmelCase = self.get_successors(_snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case )
else:
# retrieve the best current path
lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case )
else:
self.open_nodes.append(_snake_case )
return [self.start.pos]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ) )
return successors
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase = current_node.parent
path.reverse()
return path
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = AStar(_snake_case , _snake_case )
lowerCAmelCase = AStar(_snake_case , _snake_case )
lowerCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
self.fwd_astar.closed_nodes.append(_snake_case )
self.bwd_astar.closed_nodes.append(_snake_case )
lowerCAmelCase = current_bwd_node
lowerCAmelCase = current_fwd_node
lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case )
else:
# retrieve the best current path
lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case )
else:
astar.open_nodes.append(_snake_case )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.fwd_astar.retrace_path(_snake_case )
lowerCAmelCase = self.bwd_astar.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCamelCase : int = (0, 0)
__UpperCamelCase : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCamelCase : List[str] = time.time()
__UpperCamelCase : Optional[int] = AStar(init, goal)
__UpperCamelCase : int = a_star.search()
__UpperCamelCase : Any = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCamelCase : List[str] = time.time()
__UpperCamelCase : Dict = BidirectionalAStar(init, goal)
__UpperCamelCase : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Tuple = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : Any = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__UpperCamelCase : Optional[Any] = args.per_device_eval_batch_size
__UpperCamelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : List[str] = True
__UpperCamelCase : int = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__UpperCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__UpperCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__UpperCamelCase : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Tuple = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCAmelCase )
# start time
lowerCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase = time.time()
lowerCAmelCase = end_time - start_time
lowerCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : List[str] = raw_datasets['''validation'''].column_names
__UpperCamelCase : Any = '''question''' if '''question''' in column_names else column_names[0]
__UpperCamelCase : Optional[int] = '''context''' if '''context''' in column_names else column_names[1]
__UpperCamelCase : Optional[int] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__UpperCamelCase : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__UpperCamelCase : Any = raw_datasets['''validation''']
# Validation Feature Creation
__UpperCamelCase : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__UpperCamelCase : Any = default_data_collator
__UpperCamelCase : Tuple = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__UpperCamelCase : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase = postprocess_qa_predictions(
examples=_UpperCAmelCase , features=_UpperCAmelCase , predictions=_UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase , label_ids=_UpperCAmelCase )
__UpperCamelCase : int = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : int = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__UpperCamelCase : Tuple = 0.0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Tuple = timeit.default_timer()
__UpperCamelCase : Tuple = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase : str = outputs
__UpperCamelCase : List[Any] = torch.tensor(start_logits)
__UpperCamelCase : Optional[int] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : List[Any] = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : List[str] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__UpperCamelCase : List[str] = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( a__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_snake_case ) )
lowerCAmelCase = torch.manual_seed(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'A fantasy landscape, trending on artstation'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='np' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase = init_image.resize((1_28, 1_28) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'A fantasy landscape, trending on artstation'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='np' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowerCAmelCase = qkv_bias
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
lowerCAmelCase = 364 if 'coco' in model_name else 224
lowerCAmelCase = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowerCAmelCase = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False ):
lowerCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowerCAmelCase = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowerCAmelCase ,lowerCAmelCase = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowerCAmelCase = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
lowerCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowerCAmelCase ,lowerCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCAmelCase = original_model.state_dict()
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
lowerCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowerCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowerCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowerCAmelCase = key.replace('t5' , 'language' )
lowerCAmelCase = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase = load_demo_image()
lowerCAmelCase = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowerCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
lowerCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowerCAmelCase = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
lowerCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_UpperCAmelCase )
else:
# cast to same type
lowerCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowerCAmelCase = ''
lowerCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
lowerCAmelCase = original_model.generate({'image': original_pixel_values} )
lowerCAmelCase = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
lowerCAmelCase = input_ids.shape[1]
lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
__UpperCamelCase : Dict = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=32 , _snake_case=2 , _snake_case=3 , _snake_case=16 , _snake_case=[1, 2, 1] , _snake_case=[2, 2, 4] , _snake_case=2 , _snake_case=2.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=True , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=10 , _snake_case=8 , _snake_case=["stage1", "stage2", "stage3"] , _snake_case=[1, 2, 3] , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_snake_case ):
lowerCAmelCase = ['stem']
lowerCAmelCase = MaskFormerSwinBackbone(config=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
# Swin has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_snake_case ):
lowerCAmelCase = 0
return t
def check_equivalence(_snake_case , _snake_case , _snake_case , _snake_case={} ):
with torch.no_grad():
lowerCAmelCase = model(**_snake_case , return_dict=_snake_case , **_snake_case )
lowerCAmelCase = model(**_snake_case , return_dict=_snake_case , **_snake_case ).to_tuple()
def recursive_check(_snake_case , _snake_case ):
if isinstance(_snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_snake_case , _snake_case ):
recursive_check(_snake_case , _snake_case )
elif isinstance(_snake_case , _snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_snake_case , _snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_snake_case ) , set_nan_tensor_to_zero(_snake_case ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(_snake_case ).any()} and `inf`: {torch.isinf(_snake_case )}. Dict has'
F' `nan`: {torch.isnan(_snake_case ).any()} and `inf`: {torch.isinf(_snake_case )}.'
) , )
recursive_check(_snake_case , _snake_case )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case , {'output_hidden_states': True} )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
check_equivalence(_snake_case , _snake_case , _snake_case , {'output_hidden_states': True} )
@require_torch
class a ( unittest.TestCase , a__ ):
snake_case__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case__ = MaskFormerSwinConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase = backbone_class(_snake_case )
backbone.to(_snake_case )
backbone.eval()
lowerCAmelCase = backbone(**_snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase = backbone(**_snake_case , output_hidden_states=_snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase = backbone(**_snake_case , output_attentions=_snake_case )
self.assertIsNotNone(outputs.attentions )
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__UpperCamelCase : List[Any] = False
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = 12
lowerCAmelCase = 12
lowerCAmelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowerCAmelCase = TransformeraDModel(**_snake_case )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_snake_case )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowerCAmelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowerCAmelCase = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_snake_case , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCamelCase : Any = '''src/transformers'''
__UpperCamelCase : Tuple = '''docs/source/en/tasks'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start prompt.
lowerCAmelCase = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCamelCase : Any = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCamelCase : str = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
lowerCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False ):
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
lowerCAmelCase = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCamelCase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 351
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
from timeit import timeit
__UpperCamelCase : Optional[Any] = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = len(_UpperCAmelCase ) // 2
lowerCAmelCase = len(_UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(_UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(_UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return s == s[::-1]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'all({name}(key) is value for key, value in test_data.items())'
lowerCAmelCase = F'from __main__ import test_data, {name}'
lowerCAmelCase = 50_0000
lowerCAmelCase = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase )
print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 352
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 0
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : str = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
# Initialise PyTorch model
lowerCAmelCase = BertConfig.from_json_file(_UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 354
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__UpperCamelCase : Union[str, Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__UpperCamelCase : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = []
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_UpperCAmelCase )
return next_generation
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] , _UpperCAmelCase : int ):
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
# Create output image
lowerCAmelCase = Image.new('RGB' , (len(cells[0] ), len(_UpperCAmelCase )) )
lowerCAmelCase = img.load()
# Save cells to image
for x in range(len(_UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase = 255 - cells[y][x] * 255
lowerCAmelCase = (colour, colour, colour)
# Save image
images.append(_UpperCAmelCase )
lowerCAmelCase = new_generation(_UpperCAmelCase )
return images
if __name__ == "__main__":
__UpperCamelCase : List[Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 355
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Dict = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PIL.Image.BICUBIC , _snake_case = True , _snake_case = None , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PIL.Image.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case=None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_snake_case , param_name='crop_size' )
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = []
lowerCAmelCase = 11
lowerCAmelCase = int('1' + '0' * digit_len )
for num in range(_UpperCAmelCase , _UpperCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
lowerCAmelCase = 10
return solutions
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 2 ):
lowerCAmelCase = 1.0
for fraction in fraction_list(_UpperCAmelCase ):
lowerCAmelCase = Fraction(_UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 358
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( a__ , unittest.TestCase ):
snake_case__ = MgpstrTokenizer
snake_case__ = False
snake_case__ = {}
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 'tester'
lowerCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=_snake_case )
self.assertEqual(len(_snake_case ) , 1 )
lowerCAmelCase = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase ,lowerCAmelCase = self.get_input_output_texts(_snake_case )
lowerCAmelCase = tokenizer.tokenize(_snake_case )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertNotEqual(len(_snake_case ) , 0 )
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(text_a.replace(' ' , '' ) , _snake_case )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return int((input_a, input_a).count(0 ) != 0 )
def _SCREAMING_SNAKE_CASE ():
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 360
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Any = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['''ChineseCLIPFeatureExtractor''']
__UpperCamelCase : List[str] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a ( a__ , unittest.TestCase ):
snake_case__ = CpmAntTokenizer
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = ['今天', '天气', '真', '好', '!']
lowerCAmelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = [tokenizer.bos_token] + tokens
lowerCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 363
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase = str(bin(_UpperCAmelCase ) )[2:]
lowerCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase : List[str] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCamelCase : Optional[int] = json.load(f)
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(_snake_case ).to(_snake_case )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = F'facebook/wmt19-{pair}'
lowerCAmelCase = self.get_tokenizer(_snake_case )
lowerCAmelCase = self.get_model(_snake_case )
lowerCAmelCase = bleu_data[pair]['src']
lowerCAmelCase = bleu_data[pair]['tgt']
lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , truncation=_snake_case , padding='longest' ).to(_snake_case )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase = tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
lowerCAmelCase = calculate_bleu(_snake_case , _snake_case )
print(_snake_case )
self.assertGreaterEqual(scores['bleu'] , _snake_case )
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
"""simple docstring"""
__UpperCamelCase : List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCamelCase : Dict = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCamelCase : Union[str, Any] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCAmelCase = year // 100
lowerCAmelCase = (5 * (century % 4) + 2) % 7
lowerCAmelCase = year % 100
lowerCAmelCase = centurian % 12
lowerCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger('''transformers.models.encodec''')
__UpperCamelCase : str = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__UpperCamelCase : Union[str, Any] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__UpperCamelCase : Dict = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__UpperCamelCase : Any = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__UpperCamelCase : Tuple = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__UpperCamelCase : Tuple = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__UpperCamelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__UpperCamelCase : Dict = []
__UpperCamelCase : Tuple = []
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : int ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase ,lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_UpperCAmelCase , _UpperCAmelCase ):
logger.info(F'{name} was ignored' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase ,lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 3_2000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 4_8000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = EncodecModel(_UpperCAmelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_UpperCAmelCase )
lowerCAmelCase = torch.load(_UpperCAmelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 367
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( a__ ):
snake_case__ = 4_2
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 6_55_36 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case )
lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case )
lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase = block_out_channels[0] * 4
lowerCAmelCase = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
lowerCAmelCase = nn.ModuleList([] )
lowerCAmelCase = None
lowerCAmelCase = nn.ModuleList([] )
lowerCAmelCase = None
# down
lowerCAmelCase = in_channels
for i, down_block_type in enumerate(_snake_case ):
lowerCAmelCase = output_channel
lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase = i == len(_snake_case ) - 1
lowerCAmelCase = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case )
# mid
lowerCAmelCase = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
lowerCAmelCase = list(reversed(_snake_case ) )
lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase = out_channels
else:
lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case ):
lowerCAmelCase = output_channel
lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(_snake_case ) - 1 else final_upsample_channels
)
lowerCAmelCase = i == len(_snake_case ) - 1
lowerCAmelCase = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case )
lowerCAmelCase = output_channel
# out
lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = timestep
if not torch.is_tensor(_snake_case ):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps[None].to(sample.device )
lowerCAmelCase = self.time_proj(_snake_case )
if self.config.use_timestep_embedding:
lowerCAmelCase = self.time_mlp(_snake_case )
else:
lowerCAmelCase = timestep_embed[..., None]
lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase = ()
for downsample_block in self.down_blocks:
lowerCAmelCase ,lowerCAmelCase = downsample_block(hidden_states=_snake_case , temb=_snake_case )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase = self.mid_block(_snake_case , _snake_case )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase = down_block_res_samples[-1:]
lowerCAmelCase = down_block_res_samples[:-1]
lowerCAmelCase = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case )
# 5. post-process
if self.out_block:
lowerCAmelCase = self.out_block(_snake_case , _snake_case )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case )
| 368
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileBertTokenizer
snake_case__ = MobileBertTokenizerFast
snake_case__ = True
snake_case__ = True
snake_case__ = filter_non_english
snake_case__ = '''google/mobilebert-uncased'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = tokenizer.tokenize(_snake_case )
lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(_snake_case )
lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# With lower casing
lowerCAmelCase = self.get_tokenizer(do_lower_case=_snake_case )
lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=_snake_case )
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = tokenizer.tokenize(_snake_case )
lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(_snake_case )
lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(_snake_case ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=_snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_snake_case , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Tuple ):
with open(_UpperCAmelCase , 'r' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__UpperCamelCase : Tuple = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__UpperCamelCase : Dict = torch.device('''cuda''', local_rank)
__UpperCamelCase : List[str] = socket.gethostname()
__UpperCamelCase : Optional[Any] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : Any = dist.get_rank()
__UpperCamelCase : List[Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , **__a : Any ) -> Any:
requires_backends(self , ["bs4"] )
super().__init__(**__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : int = []
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCamelCase : Any = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
_UpperCamelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] ) -> List[Any]:
_UpperCamelCase : Any = BeautifulSoup(__a , "html.parser" )
_UpperCamelCase : Dict = []
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[str] = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCamelCase : List[str] = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple , __a : int ) -> List[Any]:
_UpperCamelCase : int = ""
for tagname, subs in zip(__a , __a ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : List[str] , __a : str ) -> BatchFeature:
_UpperCamelCase : str = False
# Check that strings has a valid type
if isinstance(__a , __a ):
_UpperCamelCase : int = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
_UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F'''but is of type {type(__a )}.''' )
_UpperCamelCase : Optional[Any] = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
_UpperCamelCase : str = [html_strings]
# Get nodes + xpaths
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Any = []
for html_string in html_strings:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = self.get_three_from_single(__a )
nodes.append(__a )
_UpperCamelCase : Any = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
_UpperCamelCase : Dict = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
_UpperCamelCase : Optional[Any] = {"nodes": nodes, "xpaths": xpaths}
_UpperCamelCase : Any = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowercase_ ):
print(F'''{i}\t\t{d}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
for j in range(lowercase_ ):
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [float("inf" )] * vertex_count
_UpperCamelCase : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase_ ):
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_UpperCamelCase : List[str] = distance[u] + w
_UpperCamelCase : Any = check_negative_cycle(lowercase_ ,lowercase_ ,lowercase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input("Enter number of vertices: ").strip())
lowerCamelCase__ = int(input("Enter number of edges: ").strip())
lowerCamelCase__ = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
lowerCamelCase__ = {"src": src, "dst": dest, "weight": weight}
lowerCamelCase__ = int(input("\nEnter shortest path source:").strip())
lowerCamelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : str , __a : List[Any]=16 , __a : Optional[Any]=13 , __a : Union[str, Any]=7 , __a : Optional[int]=14 , __a : int=10 , __a : Optional[int]=19 , __a : str=5 , __a : Dict=4 , __a : str=True , __a : List[str]=16 , __a : List[Any]=2 , __a : Dict=4 , __a : Optional[int]=4 , __a : Tuple="gelu" , __a : Any=0.1 , __a : Tuple=0.1 , __a : List[Any]=[1, 2, 3, 4, 5] , __a : int=25 , __a : List[Any]=5 , ) -> Any:
_UpperCamelCase : str = d_model
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Optional[int] = prediction_length
_UpperCamelCase : Any = context_length
_UpperCamelCase : Any = cardinality
_UpperCamelCase : List[Any] = num_time_features
_UpperCamelCase : List[str] = lags_sequence
_UpperCamelCase : Union[str, Any] = embedding_dimension
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = context_length
_UpperCamelCase : Any = prediction_length + label_length
_UpperCamelCase : Tuple = label_length
_UpperCamelCase : Any = moving_average
_UpperCamelCase : Union[str, Any] = autocorrelation_factor
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict ) -> Tuple:
_UpperCamelCase : Any = config.context_length + max(config.lags_sequence )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCamelCase : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCamelCase : Any = floats_tensor([self.batch_size, _past_length] )
_UpperCamelCase : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCamelCase : int = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCamelCase : str = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : List[str] = self.get_config()
_UpperCamelCase : int = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
_UpperCamelCase, _UpperCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict , __a : Dict ) -> Optional[Any]:
_UpperCamelCase : Any = AutoformerModel(config=__a ).to(__a ).eval()
_UpperCamelCase : List[Any] = model(**__a )
_UpperCamelCase : Tuple = outputs.encoder_last_hidden_state
_UpperCamelCase : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__a )
_UpperCamelCase : int = AutoformerEncoder.from_pretrained(__a ).to(__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = model.create_network_inputs(**__a )
_UpperCamelCase, _UpperCamelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCamelCase : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCamelCase : Tuple = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCamelCase : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCamelCase : Dict = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCamelCase : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCamelCase : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
_UpperCamelCase : str = AutoformerDecoder.from_pretrained(__a ).to(__a )
_UpperCamelCase : Tuple = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :str = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = AutoformerModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
_UpperCamelCase, _UpperCamelCase : int = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Any = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCamelCase : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Any = [*signature.parameters.keys()]
_UpperCamelCase : int = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = getattr(self.model_tester , "seq_length" , __a )
_UpperCamelCase : List[str] = getattr(self.model_tester , "decoder_seq_length" , __a )
_UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , __a )
_UpperCamelCase : str = getattr(self.model_tester , "d_model" , __a )
_UpperCamelCase : List[str] = getattr(self.model_tester , "num_attention_heads" , __a )
_UpperCamelCase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = True
_UpperCamelCase : Dict = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Dict = True
_UpperCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCamelCase : Optional[Any] = len(__a )
_UpperCamelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
_UpperCamelCase : str = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCamelCase : Any = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
_UpperCamelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_retain_grad_hidden_states_attentions()
def lowercase__ ( lowercase_="train-batch.pt" ) -> str:
"""simple docstring"""
_UpperCamelCase : Any = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=lowercase_ ,repo_type="dataset" )
_UpperCamelCase : Tuple = torch.load(lowercase_ ,map_location=lowercase_ )
return batch
@require_torch
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
_UpperCamelCase : List[str] = prepare_batch()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCamelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : List[str] = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
_UpperCamelCase : int = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
_UpperCamelCase : int = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCamelCase : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCamelCase : Optional[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
_UpperCamelCase : Tuple = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCamelCase : List[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCamelCase : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
_UpperCamelCase : Optional[Any] = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=__a )
_UpperCamelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return int(x / 2**20 )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __enter__( self : Dict ) -> Optional[int]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCamelCase : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *__a : List[str] ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
_UpperCamelCase : List[str] = torch.cuda.memory_allocated()
_UpperCamelCase : int = torch.cuda.max_memory_allocated()
_UpperCamelCase : Optional[Any] = bamb(self.end - self.begin )
_UpperCamelCase : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase__ ( lowercase_ ,lowercase_ = 16 ,lowercase_ = "bert-base-cased" ,lowercase_ = 320 ,lowercase_ = 160 ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = AutoTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : Optional[Any] = load_dataset(
"glue" ,"mrpc" ,split={"train": F'''train[:{n_train}]''', "validation": F'''validation[:{n_val}]'''} )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Optional[Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowercase_ ,max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : Tuple = datasets.map(
lowercase_ ,batched=lowercase_ ,remove_columns=["idx", "sentence1", "sentence2"] ,load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return tokenizer.pad(lowercase_ ,padding="longest" ,return_tensors="pt" )
# Instantiate dataloaders.
_UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets["train"] ,shuffle=lowercase_ ,collate_fn=lowercase_ ,batch_size=lowercase_ )
_UpperCamelCase : Any = DataLoader(
tokenized_datasets["validation"] ,shuffle=lowercase_ ,collate_fn=lowercase_ ,batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Dict = config["lr"]
_UpperCamelCase : List[Any] = int(config["num_epochs"] )
_UpperCamelCase : int = int(config["seed"] )
_UpperCamelCase : List[str] = int(config["batch_size"] )
_UpperCamelCase : Optional[int] = args.model_name_or_path
set_seed(lowercase_ )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = get_dataloaders(lowercase_ ,lowercase_ ,lowercase_ ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : str = AutoModelForSequenceClassification.from_pretrained(lowercase_ ,return_dict=lowercase_ )
# Instantiate optimizer
_UpperCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : Optional[Any] = optimizer_cls(params=model.parameters() ,lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCamelCase : str = 1
_UpperCamelCase : Dict = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase_ ,num_warmup_steps=0 ,num_training_steps=lowercase_ ,)
else:
_UpperCamelCase : Any = DummyScheduler(lowercase_ ,total_num_steps=lowercase_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = accelerator.prepare(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : Any = 0
# Now we train the model
_UpperCamelCase : Tuple = {}
for epoch in range(lowercase_ ,lowercase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase_ ):
_UpperCamelCase : List[Any] = model(**lowercase_ )
_UpperCamelCase : List[str] = outputs.loss
_UpperCamelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCamelCase : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"peak_memory_utilization.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" ,type=lowercase_ ,default="bert-base-cased" ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=lowercase_ ,)
parser.add_argument(
"--output_dir" ,type=lowercase_ ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--peak_memory_upper_bound" ,type=lowercase_ ,default=lowercase_ ,help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." ,)
parser.add_argument(
"--n_train" ,type=lowercase_ ,default=320 ,help="Number of training examples to use." ,)
parser.add_argument(
"--n_val" ,type=lowercase_ ,default=160 ,help="Number of validation examples to use." ,)
parser.add_argument(
"--num_epochs" ,type=lowercase_ ,default=1 ,help="Number of train epochs." ,)
_UpperCamelCase : int = parser.parse_args()
_UpperCamelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(lowercase_ ,lowercase_ )
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCamelCase : Dict = parser.parse_args()
return args.f
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[Any] ) -> Tuple:
_UpperCamelCase : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(__a , "argv" , __a ):
_UpperCamelCase : str = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__a , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
_UpperCamelCase : int = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(__a )
_UpperCamelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__a )
_UpperCamelCase : List[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__a )
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Tuple = 0
_UpperCamelCase : str = knapsack(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase : str = values[index] + knapsack(
lowercase_ ,lowercase_ ,lowercase_ ,max_weight - weights[index] ,index + 1 )
return max(lowercase_ ,lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "summarization"
SCREAMING_SNAKE_CASE__ :str = ["loss"]
SCREAMING_SNAKE_CASE__ :Any = ROUGE_KEYS
SCREAMING_SNAKE_CASE__ :List[str] = "rouge2"
def __init__( self : Any , __a : Optional[int] , **__a : int ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCamelCase : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
_UpperCamelCase : Optional[int] = Path(self.output_dir ) / "metrics.json"
_UpperCamelCase : Tuple = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_UpperCamelCase : Any = 0
_UpperCamelCase : Optional[int] = defaultdict(__a )
_UpperCamelCase : Optional[Any] = self.config.model_type
_UpperCamelCase : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_UpperCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCamelCase : Any = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_UpperCamelCase : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCamelCase : Dict = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCamelCase : Any = get_git_info()["repo_sha"]
_UpperCamelCase : Dict = hparams.num_workers
_UpperCamelCase : Optional[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
_UpperCamelCase : Optional[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCamelCase : Dict = self.decoder_start_token_id
_UpperCamelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCamelCase : List[Any] = self.hparams.eval_max_gen_length
else:
_UpperCamelCase : List[str] = self.model.config.max_length
_UpperCamelCase : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
_UpperCamelCase : Tuple = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
_UpperCamelCase : Optional[Any] = True
return readable_batch
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict , **__a : List[Any] ) -> Any:
return self.model(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : dict ) -> Tuple:
_UpperCamelCase : Optional[int] = self.tokenizer.pad_token_id
_UpperCamelCase, _UpperCamelCase : Dict = batch["input_ids"], batch["attention_mask"]
_UpperCamelCase : Any = batch["labels"]
if isinstance(self.model , __a ):
_UpperCamelCase : Tuple = self.model._shift_right(__a )
else:
_UpperCamelCase : Optional[int] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCamelCase : Dict = decoder_input_ids
self.save_readable_batch(__a )
_UpperCamelCase : str = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
_UpperCamelCase : List[Any] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCamelCase : List[Any] = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCamelCase : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCamelCase : Dict = nn.functional.log_softmax(__a , dim=-1 )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.tokenizer.pad_token_id
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] , __a : Optional[int] ) -> Dict:
_UpperCamelCase : str = self._step(__a )
_UpperCamelCase : Dict = dict(zip(self.loss_names , __a ) )
# tokens per batch
_UpperCamelCase : Tuple = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_UpperCamelCase : List[str] = batch["input_ids"].shape[0]
_UpperCamelCase : str = batch["input_ids"].eq(self.pad ).sum()
_UpperCamelCase : int = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any , __a : Optional[int] ) -> Dict:
return self._generative_step(__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] , __a : int="val" ) -> Dict:
self.step_count += 1
_UpperCamelCase : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCamelCase : str = losses["loss"]
_UpperCamelCase : Dict = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_UpperCamelCase : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCamelCase : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
_UpperCamelCase : List[str] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_UpperCamelCase : List[str] = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
_UpperCamelCase : Union[str, Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : Optional[int] ) -> Dict:
return calculate_rouge(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : dict ) -> dict:
_UpperCamelCase : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCamelCase : Union[str, Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCamelCase : Optional[Any] = (time.time() - ta) / batch["input_ids"].shape[0]
_UpperCamelCase : List[str] = self.ids_to_clean_text(__a )
_UpperCamelCase : List[str] = self.ids_to_clean_text(batch["labels"] )
_UpperCamelCase : Union[str, Any] = self._step(__a )
_UpperCamelCase : Tuple = dict(zip(self.loss_names , __a ) )
_UpperCamelCase : Dict = self.calc_generative_metrics(__a , __a )
_UpperCamelCase : Union[str, Any] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Union[str, Any] , __a : int ) -> Dict:
return self._generative_step(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int ) -> Tuple:
return self.validation_epoch_end(__a , prefix="test" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any] ) -> SeqaSeqDataset:
_UpperCamelCase : Any = self.n_obs[type_path]
_UpperCamelCase : int = self.target_lens[type_path]
_UpperCamelCase : int = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def __SCREAMING_SNAKE_CASE ( self : str , __a : str , __a : int , __a : bool = False ) -> DataLoader:
_UpperCamelCase : Optional[int] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCamelCase : Optional[Any] = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCamelCase : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
_UpperCamelCase : Tuple = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Optional[int] , __a : List[str] ) -> List[str]:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"--max_source_length" , default=1024 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=__a )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=__a )
parser.add_argument("--max_tokens_per_batch" , type=__a , default=__a )
parser.add_argument("--logger_name" , type=__a , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=__a , default=-1 , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=__a , default=500 , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=__a , default=-1 , required=__a , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=__a , default="summarization" , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=__a , default=0.0 , required=__a )
parser.add_argument("--src_lang" , type=__a , default="" , required=__a )
parser.add_argument("--tgt_lang" , type=__a , default="" , required=__a )
parser.add_argument("--eval_beams" , type=__a , default=__a , required=__a )
parser.add_argument(
"--val_metric" , type=__a , default=__a , required=__a , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=__a , default=__a , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=__a , default=1 , required=__a , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=__a , default=-1 , required=__a , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "translation"
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["loss"]
SCREAMING_SNAKE_CASE__ :List[str] = ["bleu"]
SCREAMING_SNAKE_CASE__ :List[Any] = "bleu"
def __init__( self : Union[str, Any] , __a : int , **__a : List[Any] ) -> Optional[Any]:
super().__init__(__a , **__a )
_UpperCamelCase : Optional[Any] = hparams.src_lang
_UpperCamelCase : Optional[int] = hparams.tgt_lang
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[int] , __a : Dict ) -> dict:
return calculate_bleu(__a , __a )
def lowercase__ ( lowercase_ ,lowercase_=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowercase_ )
check_output_dir(lowercase_ ,expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCamelCase : SummarizationModule = SummarizationModule(lowercase_ )
else:
_UpperCamelCase : SummarizationModule = TranslationModule(lowercase_ )
_UpperCamelCase : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
_UpperCamelCase : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : str = os.environ.get("WANDB_PROJECT" ,lowercase_ )
_UpperCamelCase : List[Any] = WandbLogger(name=model.output_dir.name ,project=lowercase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : Optional[Any] = WandbLogger(name=model.output_dir.name ,project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_UpperCamelCase : Any = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
_UpperCamelCase : int = False
_UpperCamelCase : Dict = args.val_metric == "loss"
_UpperCamelCase : pl.Trainer = generic_train(
lowercase_ ,lowercase_ ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,lowercase_ ) ,early_stopping_callback=lowercase_ ,logger=lowercase_ ,)
pickle_save(model.hparams ,model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
_UpperCamelCase : Tuple = ""
_UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"*.ckpt" ) ,recursive=lowercase_ ) )
if checkpoints:
_UpperCamelCase : Dict = checkpoints[-1]
_UpperCamelCase : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = pl.Trainer.add_argparse_args(parser)
lowerCamelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
main(args)
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = torch.nn.Linear(2 ,4 )
_UpperCamelCase : Tuple = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_UpperCamelCase : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(lowercase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_UpperCamelCase : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_UpperCamelCase : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase_ )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = Accelerator(cpu=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[int] = Accelerator()
_UpperCamelCase : Any = GradientState()
assert state.num_steps == 1
_UpperCamelCase : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_UpperCamelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Optional[Any] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = create_components()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : List[Any] = accelerator.prepare(__a , __a , __a , __a , __a )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = create_components()
accelerator.prepare(__a , __a , __a , __a , __a )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__a : Union[str, Any] , **__a : int ):
pass
with patch("torch.cuda.set_device" , __a ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_UpperCamelCase : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(__a , __a , __a , __a , __a )
_UpperCamelCase : str = get_signature(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__a )
# make sure random weights don't match
load_random_weights(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : List[str] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Union[str, Any] = create_components()
accelerator.prepare(__a , __a , __a , __a , __a )
_UpperCamelCase : str = get_signature(__a )
# saving hook
def save_config(__a : Optional[Any] , __a : str , __a : Optional[int] ):
_UpperCamelCase : Optional[int] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__a , "data.json" ) , "w" ) as f:
json.dump(__a , __a )
# loading hook
def load_config(__a : str , __a : Union[str, Any] ):
with open(os.path.join(__a , "data.json" ) , "r" ) as f:
_UpperCamelCase : int = json.load(__a )
_UpperCamelCase : Tuple = config["class_name"]
_UpperCamelCase : Tuple = accelerator.register_save_state_pre_hook(__a )
_UpperCamelCase : List[str] = accelerator.register_load_state_pre_hook(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__a )
# make sure random weights don't match with hooks
load_random_weights(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 )
# random class name to verify correct one is loaded
_UpperCamelCase : Dict = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__a )
# make sure random weights don't match with hooks removed
load_random_weights(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 )
# random class name to verify correct one is loaded
_UpperCamelCase : Any = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__a )
self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Union[str, Any] = create_components()
_UpperCamelCase : Optional[Any] = None
# This should work
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = accelerator.prepare(
__a , __a , __a , __a , __a , __a )
self.assertTrue(dummy_obj is None )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Optional[int] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = create_components()
_UpperCamelCase : Union[str, Any] = [1, 2, 3]
# This should work
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = accelerator.prepare(
__a , __a , __a , __a , __a , __a )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
from transformers import AutoModelForCausalLM
_UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map={"": 0} , )
_UpperCamelCase : str = Accelerator()
# This should work
_UpperCamelCase : Optional[int] = accelerator.prepare(__a )
@slow
@require_bnb
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
_UpperCamelCase : Dict = Accelerator()
with init_empty_weights():
_UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCamelCase : str = infer_auto_device_map(__a )
_UpperCamelCase : int = "cpu"
_UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__a , load_in_abit=__a , llm_inta_enable_fpaa_cpu_offload=__a )
# This should not work and get value error
with self.assertRaises(__a ):
_UpperCamelCase : List[Any] = accelerator.prepare(__a )
@slow
@require_bnb
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
from transformers import AutoModelForCausalLM
_UpperCamelCase : int = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCamelCase : Union[str, Any] = infer_auto_device_map(__a )
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map=__a , )
_UpperCamelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(__a ):
_UpperCamelCase : Dict = accelerator.prepare(__a )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
_UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_UpperCamelCase : int = infer_auto_device_map(__a )
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map=__a , )
_UpperCamelCase : Any = Accelerator()
# This should work
_UpperCamelCase : List[str] = accelerator.prepare(__a )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = torch.nn.Linear(10 , 10 )
_UpperCamelCase : Tuple = torch.optim.SGD(model.parameters() , lr=0.01 )
_UpperCamelCase : Any = Accelerator(cpu=__a )
_UpperCamelCase : str = accelerator.prepare(__a )
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.