code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
_lowercase : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_lowercase : Tuple = dataset_size < in_memory_max_size
else:
_lowercase : Any = False
_lowercase : Union[str, Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 21
|
"""simple docstring"""
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
a : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a : Dict = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
a : str = input('''Enter Video/IGTV url: ''').strip()
a : Any = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 105
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[8, 16, 32, 64] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=[2, 3, 4] , _lowercase=1 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embeddings_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
_lowerCAmelCase = num_groups
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase = None
_lowerCAmelCase = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowercase : Any = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Dict = False
_lowercase : List[Any] = False
_lowercase : Any = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BitModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase = layer_type
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def A ():
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : List[Any] = (BitBackbone,) if is_torch_available() else ()
_lowercase : str = BitConfig
_lowercase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BitModelTester(self )
| 229
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , _lowercase ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _lowercase ( self , _lowercase ):
"""simple docstring"""
self.neighbors.append(_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = weight
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Optional[int] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __lowerCamelCase )
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(__lowerCamelCase )
q.remove(__lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(__lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(__lowerCamelCase )
hq.heapify(__lowerCamelCase )
while h:
_lowerCAmelCase = hq.heappop(__lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A : int = logging.get_logger(__name__)
@add_end_docstrings(lowercase__)
class _lowercase ( lowercase__):
"""simple docstring"""
def __init__( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(__lowerCamelCase )
def __call__( self : str , __lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCamelCase : Tuple ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return {}, {}, {}
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = load_image(__lowerCamelCase )
lowerCamelCase__ : List[str] = image.size
lowerCamelCase__ : Dict = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model(**__lowerCamelCase )
return model_outputs
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = model_outputs.predicted_depth
lowerCamelCase__ : Tuple = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__lowerCamelCase )
lowerCamelCase__ : int = prediction.squeeze().cpu().numpy()
lowerCamelCase__ : Tuple = (output * 255 / np.max(__lowerCamelCase )).astype("uint8" )
lowerCamelCase__ : List[Any] = Image.fromarray(__lowerCamelCase )
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Any = predicted_depth
lowerCamelCase__ : Any = depth
return output_dict
| 184
|
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Any=3 , lowerCamelCase : List[str]=3 , lowerCamelCase : List[str]=("DownEncoderBlock2D",) , lowerCamelCase : List[str]=(64,) , lowerCamelCase : int=2 , lowerCamelCase : Tuple=32 , lowerCamelCase : Union[str, Any]="silu" , lowerCamelCase : int=True , ) -> int:
super().__init__()
__snake_case : Dict = layers_per_block
__snake_case : Optional[int] = torch.nn.Convad(
lowerCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__snake_case : str = None
__snake_case : Any = nn.ModuleList([] )
# down
__snake_case : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase ):
__snake_case : int = output_channel
__snake_case : List[Any] = block_out_channels[i]
__snake_case : Any = i == len(lowerCamelCase ) - 1
__snake_case : Any = get_down_block(
lowerCamelCase , num_layers=self.layers_per_block , in_channels=lowerCamelCase , out_channels=lowerCamelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase , resnet_groups=lowerCamelCase , attention_head_dim=lowerCamelCase , temb_channels=lowerCamelCase , )
self.down_blocks.append(lowerCamelCase )
# mid
__snake_case : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase , temb_channels=lowerCamelCase , )
# out
__snake_case : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase , eps=1E-6 )
__snake_case : str = nn.SiLU()
__snake_case : List[Any] = 2 * out_channels if double_z else out_channels
__snake_case : int = nn.Convad(block_out_channels[-1] , lowerCamelCase , 3 , padding=1 )
__snake_case : Tuple = False
def __snake_case ( self : Tuple , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : int = x
__snake_case : Optional[Any] = self.conv_in(lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase : Any ):
def custom_forward(*lowerCamelCase : List[str] ):
return module(*lowerCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase ) , lowerCamelCase , use_reentrant=lowerCamelCase )
# middle
__snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , use_reentrant=lowerCamelCase )
else:
for down_block in self.down_blocks:
__snake_case : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase ) , lowerCamelCase )
# middle
__snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
__snake_case : Union[str, Any] = down_block(lowerCamelCase )
# middle
__snake_case : List[Any] = self.mid_block(lowerCamelCase )
# post-process
__snake_case : Any = self.conv_norm_out(lowerCamelCase )
__snake_case : Union[str, Any] = self.conv_act(lowerCamelCase )
__snake_case : int = self.conv_out(lowerCamelCase )
return sample
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Any=("UpDecoderBlock2D",) , lowerCamelCase : int=(64,) , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Any="silu" , lowerCamelCase : str="group" , ) -> Union[str, Any]:
super().__init__()
__snake_case : Optional[int] = layers_per_block
__snake_case : int = nn.Convad(
lowerCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__snake_case : List[Any] = None
__snake_case : List[str] = nn.ModuleList([] )
__snake_case : Any = in_channels if norm_type == "spatial" else None
# mid
__snake_case : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase , temb_channels=lowerCamelCase , )
# up
__snake_case : List[str] = list(reversed(lowerCamelCase ) )
__snake_case : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
__snake_case : str = output_channel
__snake_case : int = reversed_block_out_channels[i]
__snake_case : List[str] = i == len(lowerCamelCase ) - 1
__snake_case : Optional[Any] = get_up_block(
lowerCamelCase , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase , out_channels=lowerCamelCase , prev_output_channel=lowerCamelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase , resnet_groups=lowerCamelCase , attention_head_dim=lowerCamelCase , temb_channels=lowerCamelCase , resnet_time_scale_shift=lowerCamelCase , )
self.up_blocks.append(lowerCamelCase )
__snake_case : Optional[Any] = output_channel
# out
if norm_type == "spatial":
__snake_case : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase )
else:
__snake_case : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase , eps=1E-6 )
__snake_case : Dict = nn.SiLU()
__snake_case : Any = nn.Convad(block_out_channels[0] , lowerCamelCase , 3 , padding=1 )
__snake_case : str = False
def __snake_case ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
__snake_case : str = z
__snake_case : Tuple = self.conv_in(lowerCamelCase )
__snake_case : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase : str ):
def custom_forward(*lowerCamelCase : Optional[Any] ):
return module(*lowerCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__snake_case : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , lowerCamelCase , use_reentrant=lowerCamelCase )
__snake_case : str = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase ) , lowerCamelCase , lowerCamelCase , use_reentrant=lowerCamelCase )
else:
# middle
__snake_case : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase , lowerCamelCase )
__snake_case : Dict = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase ) , lowerCamelCase , lowerCamelCase )
else:
# middle
__snake_case : List[Any] = self.mid_block(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = sample.to(lowerCamelCase )
# up
for up_block in self.up_blocks:
__snake_case : Any = up_block(lowerCamelCase , lowerCamelCase )
# post-process
if latent_embeds is None:
__snake_case : str = self.conv_norm_out(lowerCamelCase )
else:
__snake_case : List[Any] = self.conv_norm_out(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.conv_act(lowerCamelCase )
__snake_case : str = self.conv_out(lowerCamelCase )
return sample
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int="random" , lowerCamelCase : int=False , lowerCamelCase : Optional[int]=True ) -> Dict:
super().__init__()
__snake_case : List[str] = n_e
__snake_case : str = vq_embed_dim
__snake_case : Optional[int] = beta
__snake_case : List[str] = legacy
__snake_case : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__snake_case : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__snake_case : int = self.used.shape[0]
__snake_case : int = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__snake_case : Tuple = self.re_embed
__snake_case : Any = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__snake_case : List[Any] = n_e
__snake_case : Tuple = sane_index_shape
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[int]:
__snake_case : List[Any] = inds.shape
assert len(lowerCamelCase ) > 1
__snake_case : Union[str, Any] = inds.reshape(ishape[0] , -1 )
__snake_case : Optional[Any] = self.used.to(lowerCamelCase )
__snake_case : Tuple = (inds[:, :, None] == used[None, None, ...]).long()
__snake_case : Optional[Any] = match.argmax(-1 )
__snake_case : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
__snake_case : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__snake_case : int = self.unknown_index
return new.reshape(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> Any:
__snake_case : int = inds.shape
assert len(lowerCamelCase ) > 1
__snake_case : str = inds.reshape(ishape[0] , -1 )
__snake_case : Any = self.used.to(lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
__snake_case : Optional[Any] = 0 # simply set to zero
__snake_case : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase )
return back.reshape(lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : int ) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
__snake_case : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
__snake_case : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__snake_case : List[Any] = torch.argmin(torch.cdist(lowerCamelCase , self.embedding.weight ) , dim=1 )
__snake_case : Dict = self.embedding(lowerCamelCase ).view(z.shape )
__snake_case : Dict = None
__snake_case : str = None
# compute loss for embedding
if not self.legacy:
__snake_case : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__snake_case : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__snake_case : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
__snake_case : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__snake_case : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__snake_case : Optional[int] = self.remap_to_used(lowerCamelCase )
__snake_case : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__snake_case : Optional[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Dict ) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__snake_case : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
__snake_case : Optional[int] = self.unmap_to_all(lowerCamelCase )
__snake_case : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__snake_case : int = self.embedding(lowerCamelCase )
if shape is not None:
__snake_case : Any = z_q.view(lowerCamelCase )
# reshape back to match original input shape
__snake_case : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : str=False ) -> Tuple:
__snake_case : str = parameters
__snake_case : int = torch.chunk(lowerCamelCase , 2 , dim=1 )
__snake_case : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
__snake_case : Dict = deterministic
__snake_case : Any = torch.exp(0.5 * self.logvar )
__snake_case : Tuple = torch.exp(self.logvar )
if self.deterministic:
__snake_case : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __snake_case ( self : Dict , lowerCamelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__snake_case : str = randn_tensor(
self.mean.shape , generator=lowerCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__snake_case : Dict = self.mean + self.std * sample
return x
def __snake_case ( self : str , lowerCamelCase : Optional[Any]=None ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : List[Any]=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
__snake_case : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
return self.mean
| 370
|
from scipy.stats import spearmanr
import datasets
_snake_case : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case : Optional[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Dict:
__snake_case : Optional[int] = spearmanr(lowerCamelCase , lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134
| 0
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCamelCase : Any = parser.parse_args()
if args.model_type == "roberta":
__lowerCamelCase : Dict = RobertaForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase : List[str] = """roberta"""
elif args.model_type == "gpt2":
__lowerCamelCase : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name)
__lowerCamelCase : Any = """transformer"""
__lowerCamelCase : Union[str, Any] = model.state_dict()
__lowerCamelCase : Optional[int] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__lowerCamelCase : List[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__lowerCamelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
__lowerCamelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
__lowerCamelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
__lowerCamelCase : Tuple = state_dict[param_name]
# Transformer Blocks #
__lowerCamelCase : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__lowerCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__lowerCamelCase : Optional[Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__lowerCamelCase : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__lowerCamelCase : Optional[Any] = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[f"""lm_head.dense.{w}"""]
__lowerCamelCase : Union[str, Any] = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__lowerCamelCase : Optional[Any] = state_dict[f"""{prefix}.ln_f.{w}"""]
__lowerCamelCase : Dict = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 52
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : str = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_text_model'''
def __init__( self : Tuple , __magic_name__ : List[Any]=49_408 , __magic_name__ : str=512 , __magic_name__ : Dict=2_048 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]="quick_gelu" , __magic_name__ : Optional[Any]=1e-5 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1.0 , __magic_name__ : Optional[int]=0 , __magic_name__ : Tuple=49_406 , __magic_name__ : Optional[Any]=49_407 , **__magic_name__ : List[str] , ) -> Tuple:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_vision_model'''
def __init__( self : List[Any] , __magic_name__ : int=768 , __magic_name__ : Dict=3_072 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=768 , __magic_name__ : Any=32 , __magic_name__ : Tuple="quick_gelu" , __magic_name__ : List[Any]=1e-5 , __magic_name__ : Tuple=0.0 , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : Optional[int] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : Union[str, Any] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit'''
lowerCamelCase__ = True
def __init__( self : List[Any] , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=512 , __magic_name__ : Any=2.6592 , __magic_name__ : Optional[int]=True , **__magic_name__ : Union[str, Any] , ) -> Any:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = OwlViTTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = OwlViTVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = logit_scale_init_value
SCREAMING_SNAKE_CASE_ = return_dict
SCREAMING_SNAKE_CASE_ = 1.0
@classmethod
def __A ( cls : Optional[int] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : str , __magic_name__ : Dict , __magic_name__ : Dict , **__magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = text_config
SCREAMING_SNAKE_CASE_ = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __A ( self : Dict ) -> float:
return 1e-4
def __A ( self : Any , __magic_name__ : "ProcessorMixin" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : Union[str, Any] ) -> int:
return 14
| 370
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = ShapEImgaImgPipeline
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return 8
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a = CLIPVisionModel(__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__magic_name__ , do_normalize=__magic_name__ , do_resize=__magic_name__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
a = PriorTransformer(**__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__magic_name__ )
return model
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_image_processor
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Tuple=0 ):
'''simple docstring'''
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = pipe(**self.get_dummy_inputs(__magic_name__ ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = torch_device == """cpu"""
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = 1
a = 2
a = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
a = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = torch.Generator(device=__magic_name__ ).manual_seed(0 )
a = pipe(
__magic_name__ , generator=__magic_name__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 228
| 0
|
import sys
import turtle
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(__SCREAMING_SNAKE_CASE , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , get_mid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__UpperCamelCase : Optional[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__UpperCamelCase : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 370
|
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''speech_to_text'''
_lowerCamelCase: Union[str, Any] = ['''past_key_values''']
_lowerCamelCase: Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,A_ : Tuple=1_0000 ,A_ : Union[str, Any]=12 ,A_ : int=2048 ,A_ : List[Any]=4 ,A_ : int=6 ,A_ : List[Any]=2048 ,A_ : List[str]=4 ,A_ : Optional[Any]=0.0 ,A_ : str=0.0 ,A_ : Dict=True ,A_ : List[Any]=True ,A_ : int="relu" ,A_ : List[str]=256 ,A_ : Dict=0.1 ,A_ : List[Any]=0.0 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=0.02 ,A_ : str=2 ,A_ : Union[str, Any]=True ,A_ : List[str]=1 ,A_ : Union[str, Any]=0 ,A_ : int=2 ,A_ : int=6000 ,A_ : Dict=1024 ,A_ : str=2 ,A_ : Tuple=(5, 5) ,A_ : List[str]=1024 ,A_ : str=80 ,A_ : int=1 ,**A_ : Union[str, Any] ,) -> Optional[Any]:
A = vocab_size
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
A = num_conv_layers
A = list(A_ )
A = conv_channels
A = input_feat_per_channel
A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,decoder_start_token_id=A_ ,**A_ ,)
| 74
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _A ( _a : int , _a : str , _a : Union[str, Any]=1_0_2_4 , _a : Tuple=1_0_2_4 , _a : int=False , **_a : List[str] ):
"""simple docstring"""
A = AutoTokenizer.from_pretrained(_a )
A = SeqaSeqDataset(_a , _a , _a , _a , type_path="""train""" , **_a )
A = tok.pad_token_id
def get_lens(_a : str ):
A = tqdm(
DataLoader(_a , batch_size=5_1_2 , num_workers=8 , shuffle=_a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A = []
for batch in dl:
A = batch['''input_ids'''].ne(_a ).sum(1 ).tolist()
A = batch['''labels'''].ne(_a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_a , _a ):
max_lens.append(max(_a , _a ) )
else:
max_lens.extend(_a )
return max_lens
A = get_lens(_a )
A = SeqaSeqDataset(_a , _a , _a , _a , type_path="""val""" , **_a )
A = get_lens(_a )
pickle_save(_a , train_ds.len_file )
pickle_save(_a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 360
|
"""simple docstring"""
import random
def _A ( _a : list , _a : Any ):
"""simple docstring"""
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def _A ( _a : list , _a : int ):
"""simple docstring"""
if index >= len(_a ) or index < 0:
return None
A = items[random.randint(0 , len(_a ) - 1 )]
A = 0
A , A , A = _partition(_a , _a )
A = len(_a )
A = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 77
| 0
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = 9
UpperCAmelCase_ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : List[str] = kruskal(__snake_case , __snake_case )
UpperCAmelCase_ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 29
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29
| 1
|
'''simple docstring'''
UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _snake_case ( _SCREAMING_SNAKE_CASE : dict[int, list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[bool] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def _snake_case ( _SCREAMING_SNAKE_CASE : dict[int, list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[bool] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def _snake_case ( _SCREAMING_SNAKE_CASE : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 368
|
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 187
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ : str ='''PoolFormerConfig'''
# Base docstring
lowerCAmelCase__ : Tuple ='''sail/poolformer_s12'''
lowerCAmelCase__ : Optional[int] =[1, 512, 7, 7]
# Image classification docstring
lowerCAmelCase__ : int ='''sail/poolformer_s12'''
lowerCAmelCase__ : str ='''tabby, tabby cat'''
lowerCAmelCase__ : Any =[
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowercase ( a__ , a__ = 0.0 , a__ = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__SCREAMING_SNAKE_CASE = 1 - drop_prob
__SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__SCREAMING_SNAKE_CASE = keep_prob + torch.rand(a__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__SCREAMING_SNAKE_CASE = input.div(a__ ) * random_tensor
return output
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = None ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = drop_prob
def _A ( self , _A ):
'''simple docstring'''
return drop_path(_A , self.drop_prob , self.training )
def _A ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = patch_size if isinstance(_A , collections.abc.Iterable ) else (patch_size, patch_size)
__SCREAMING_SNAKE_CASE = stride if isinstance(_A , collections.abc.Iterable ) else (stride, stride)
__SCREAMING_SNAKE_CASE = padding if isinstance(_A , collections.abc.Iterable ) else (padding, padding)
__SCREAMING_SNAKE_CASE = nn.Convad(_A , _A , kernel_size=_A , stride=_A , padding=_A )
__SCREAMING_SNAKE_CASE = norm_layer(_A ) if norm_layer else nn.Identity()
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.projection(_A )
__SCREAMING_SNAKE_CASE = self.norm(_A )
return embeddings
class UpperCAmelCase_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , _A , **_A ):
'''simple docstring'''
super().__init__(1 , _A , **_A )
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.AvgPoolad(_A , stride=1 , padding=pool_size // 2 , count_include_pad=_A )
def _A ( self , _A ):
'''simple docstring'''
return self.pool(_A ) - hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Convad(_A , _A , 1 )
__SCREAMING_SNAKE_CASE = nn.Convad(_A , _A , 1 )
__SCREAMING_SNAKE_CASE = PoolFormerDropPath(_A )
if isinstance(config.hidden_act , _A ):
__SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
__SCREAMING_SNAKE_CASE = config.hidden_act
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conva(_A )
__SCREAMING_SNAKE_CASE = self.act_fn(_A )
__SCREAMING_SNAKE_CASE = self.drop(_A )
__SCREAMING_SNAKE_CASE = self.conva(_A )
__SCREAMING_SNAKE_CASE = self.drop(_A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = PoolFormerPooling(_A )
__SCREAMING_SNAKE_CASE = PoolFormerOutput(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(_A )
__SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(_A )
# Useful for training neural nets
__SCREAMING_SNAKE_CASE = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
__SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
__SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
__SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
def _A ( self , _A ):
'''simple docstring'''
if self.use_layer_scale:
__SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(_A ) )
__SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(_A )
__SCREAMING_SNAKE_CASE = ()
__SCREAMING_SNAKE_CASE = self.output(self.after_norm(_A ) )
__SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(_A )
__SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
__SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
__SCREAMING_SNAKE_CASE = pooling_output + hidden_states
__SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
__SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(_A ) ) )
__SCREAMING_SNAKE_CASE = hidden_states + layer_output
__SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
__SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__SCREAMING_SNAKE_CASE = nn.ModuleList(_A )
# Transformer blocks
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_A ) )
__SCREAMING_SNAKE_CASE = nn.ModuleList(_A )
def _A ( self , _A , _A=False , _A=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = () if output_hidden_states else None
__SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
__SCREAMING_SNAKE_CASE = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
__SCREAMING_SNAKE_CASE = blk(_A )
__SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
__SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = PoolFormerConfig
UpperCamelCase__ : str = '''poolformer'''
UpperCamelCase__ : str = '''pixel_values'''
UpperCamelCase__ : Union[str, Any] = True
def _A ( self , _A ):
'''simple docstring'''
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self , _A , _A=False ):
'''simple docstring'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = value
lowerCAmelCase__ : str =r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ : str =r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , UpperCamelCase_ , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def _A ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self , _A = None , _A = None , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__SCREAMING_SNAKE_CASE = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , )
__SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dense(_A )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , UpperCamelCase_ , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
__SCREAMING_SNAKE_CASE = config.num_labels
__SCREAMING_SNAKE_CASE = PoolFormerModel(_A )
# Final norm
__SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self , _A = None , _A = None , _A = None , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.poolformer(
_A , output_hidden_states=_A , return_dict=_A , )
__SCREAMING_SNAKE_CASE = outputs[0]
__SCREAMING_SNAKE_CASE = self.classifier(self.norm(_A ).mean([-2, -1] ) )
__SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE = 'single_label_classification'
else:
__SCREAMING_SNAKE_CASE = 'multi_label_classification'
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE = loss_fct(_A , _A )
if not return_dict:
__SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 257
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ : Optional[int] =getLogger(__name__)
lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict:
__SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' )
__SCREAMING_SNAKE_CASE = str(a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ )
if fpaa:
__SCREAMING_SNAKE_CASE = model.half()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(a__ , a__ )
if prefix is None:
__SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ):
__SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE = len(a__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowercase ( ) -> Any:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase ( a__=True ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=a__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=a__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
__SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )]
__SCREAMING_SNAKE_CASE = score_fn(a__ , a__ )
scores.update(a__ )
if args.dump_args:
scores.update(a__ )
if args.info:
__SCREAMING_SNAKE_CASE = args.info
if verbose:
print(a__ )
if args.score_path is not None:
json.dump(a__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 257
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , ) -> Union[str, Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = 30
lowerCAmelCase__ = self.seq_length + self.mem_len
lowerCAmelCase__ = 15
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = [10, 50, 80]
lowerCAmelCase__ = 32
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 128
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
lowerCAmelCase__ = None
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = 3
lowerCAmelCase__ = self.vocab_size - 1
lowerCAmelCase__ = 0.01
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def a ( self : Tuple ) -> List[str]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
lowerCAmelCase__ = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCAmelCase__ = {"input_ids": input_ids_a, "mems": mems_a}
lowerCAmelCase__ , lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
lowerCAmelCase__ = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCAmelCase__ = {"input_ids": input_ids_a, "labels": lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCAmelCase__ , lowerCAmelCase__ = model([input_ids_a, mems_a] ).to_tuple()
lowerCAmelCase__ = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
lowerCAmelCase__ = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case__ = () if is_tf_available() else ()
snake_case__ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ = TFTransfoXLModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def a ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Any ) -> Dict:
self.model_tester.set_seed()
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> int:
self.model_tester.set_seed()
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCAmelCase__ = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
lowerCAmelCase__ = model.get_bias()
assert name is None
else:
lowerCAmelCase__ = model.get_output_embeddings()
assert x is None
lowerCAmelCase__ = model.get_bias()
assert name is None
def a ( self : Any ) -> Tuple:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def a ( self : Optional[Any] ) -> Optional[int]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def a ( self : Union[str, Any] ) -> int:
pass
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def a ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
lowerCAmelCase__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCAmelCase__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCAmelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , max_length=200 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
| 367
|
import argparse
from collections import defaultdict
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = F'class {class_name}('
lowerCAmelCase__ = F'{4 * " "}def {test_name}('
lowerCAmelCase__ = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
for line in lines:
if line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and in_func and (line.startswith(lowerCAmelCase_ ) or line.startswith(lowerCAmelCase_ )):
lowerCAmelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = False
else:
new_lines.append(lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" ) as f:
for line in new_lines:
f.write(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
if fail is not None:
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase__ = None
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = defaultdict(lowerCAmelCase_ )
for line in correct_lines:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
UpperCamelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 221
| 0
|
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
UpperCAmelCase = 0.0
for coeff in reversed(UpperCamelCase__ ):
UpperCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
__A : int = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 273
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase : Optional[int] = TypeVar("""KT""")
UpperCAmelCase : str = TypeVar("""VT""")
class __lowerCAmelCase ( Generic[KT, VT]):
def __init__( self , lowerCAmelCase__ = "root" , lowerCAmelCase__ = None ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =key
a__ : str =value
a__ : list[Node[KT, VT]] =[]
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.key}: {self.value})'''
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT]):
def __init__( self , lowerCAmelCase__ = 0.5 , lowerCAmelCase__ = 1_6 ) -> List[Any]:
'''simple docstring'''
a__ : Node[KT, VT] =Node[KT, VT]()
a__ : int =0
a__ : List[str] =p
a__ : List[Any] =max_level
def __str__( self ) -> str:
'''simple docstring'''
a__ : List[Any] =list(self )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return F'''SkipList(level={self.level})'''
a__ : Tuple =max((len(str(SCREAMING_SNAKE_CASE_ ) ) for item in items) , default=4 )
a__ : List[Any] =max(SCREAMING_SNAKE_CASE_ , 4 ) + 4
a__ : List[str] =self.head
a__ : Union[str, Any] =[]
a__ : Optional[Any] =node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE_ , "-" ) + "* " * len(SCREAMING_SNAKE_CASE_ ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE_ ) )
while len(node.forward ) != 0:
a__ : List[Any] =node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE_ ) )
a__ : Optional[Any] =node.forward
lines.append("None".ljust(SCREAMING_SNAKE_CASE_ ) + "* " * len(SCREAMING_SNAKE_CASE_ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(SCREAMING_SNAKE_CASE_ )
def __iter__( self ) -> List[str]:
'''simple docstring'''
a__ : List[str] =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
a__ : int =node.forward[0]
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Union[str, Any] =1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowercase ( self , lowerCAmelCase__ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
a__ : Any =[]
a__ : Optional[int] =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
a__ : str =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(SCREAMING_SNAKE_CASE_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : List[Any] =self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
for i, update_node in enumerate(SCREAMING_SNAKE_CASE_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
a__ : List[str] =node.forward[i]
else:
a__ : Any =update_node.forward[:i]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
a__ : str =value
else:
a__ : Dict =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , SCREAMING_SNAKE_CASE_ ):
update_vector.append(self.head )
a__ : List[str] =level
a__ : Optional[int] =Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(SCREAMING_SNAKE_CASE_ )
else:
a__ : int =new_node
def _lowercase ( self , lowerCAmelCase__ ) -> VT | None:
'''simple docstring'''
a__ : Union[str, Any] =self._locate_node(SCREAMING_SNAKE_CASE_ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] =SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
a__ : int =skip_list.head
a__ : Tuple ={}
while node.level != 0:
a__ : int =node.forward[0]
a__ : Union[str, Any] =node.value
assert len(lowerCAmelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
a__ : Tuple =SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
a__ : List[Any] =skip_list.head
a__ : Union[str, Any] ={}
while node.level != 0:
a__ : Union[str, Any] =node.forward[0]
a__ : List[Any] =node.value
if len(lowerCAmelCase__ ) != 4:
print()
assert len(lowerCAmelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
a__ : List[str] =SkipList()
assert skip_list.find("Some key" ) is None
def _A ( ):
"""simple docstring"""
a__ : List[str] =SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def _A ( ):
"""simple docstring"""
a__ : Any =SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def _A ( ):
"""simple docstring"""
a__ : Tuple =SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def _A ( ):
"""simple docstring"""
a__ : Any =SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(SCREAMING_SNAKE_CASE : List[str] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCAmelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(SCREAMING_SNAKE_CASE : Tuple ):
return all(next_item >= item for item, next_item in zip(lowerCAmelCase__ , lst[1:] ) )
a__ : Optional[int] =SkipList()
for i in range(10 ):
skip_list.insert(lowerCAmelCase__ , lowerCAmelCase__ )
assert is_sorted(list(lowerCAmelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCAmelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCAmelCase__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
a__ : List[Any] =SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a__ : Any =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : Any =text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
a__ : Tuple =text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : List[Any] =text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
a__ : Any =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : List[str] =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
a__ : Optional[int] =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : int =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
import torch
a__ : Dict =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =pipeline("text-classification" )
a__ : Union[str, Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : Optional[Any] =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =pipeline("text-classification" , framework="tf" )
a__ : str =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : str =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a__ : List[Any] ="HuggingFace is in"
a__ : int =text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a__ : Optional[int] =["HuggingFace is in ", "Paris is in France"]
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a__ : Union[str, Any] =text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
a__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
a__ : List[str] ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a__ : Any =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a__ : Optional[int] =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 148
| 0
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_snake_case = 'bert-base-cased'
_snake_case = 'fp16'
_snake_case = 'bf16'
_snake_case = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
_a : str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _lowercase ( self : int ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Tuple = self.dist_env.copy()
_a : Dict = f"""{i + 1}"""
_a : Union[str, Any] = strategy
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : int = self.dist_env.copy()
_a : List[str] = prefetch_policy
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self : int ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Union[str, Any] = self.dist_env.copy()
_a : Optional[Any] = state_dict_type
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self : Dict ) -> Dict:
_a : List[str] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : List[str] = self.dist_env.copy()
_a : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : List[str] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_a : Any = "2000"
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : List[str] = self.dist_env.copy()
_a : int = "TRANSFORMER_BASED_WRAP"
_a : List[Any] = "T5Layer"
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Optional[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
_a : Any = self.dist_env.copy()
_a : Any = "SIZE_BASED_WRAP"
_a : str = "0"
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : Union[str, Any] = self.dist_env.copy()
_a : Tuple = mp_dtype
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Any = Accelerator()
if mp_dtype == "fp16":
_a : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
_a : List[Any] = torch.bfloataa
_a : int = MixedPrecision(param_dtype=_SCREAMING_SNAKE_CASE , reduce_dtype=_SCREAMING_SNAKE_CASE , buffer_dtype=_SCREAMING_SNAKE_CASE )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _SCREAMING_SNAKE_CASE )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _SCREAMING_SNAKE_CASE ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : Tuple = self.dist_env.copy()
_a : List[Any] = str(_SCREAMING_SNAKE_CASE ).lower()
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_SCREAMING_SNAKE_CASE ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def _lowercase ( self : List[str] ) -> List[str]:
super().setUp()
_a : Optional[Any] = 0.8_2
_a : Dict = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_a : List[Any] = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : Tuple = 160
_a : Any = 160
_a : Optional[Any] = inspect.getfile(accelerate.test_utils )
_a : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
_a : Tuple = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_a : Tuple = cmd.copy()
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def _lowercase ( self : List[Any] ) -> Dict:
_a : List[str] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
_a : List[str] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Any = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : str = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
_a : Tuple = cmd_config[:-1]
_a : List[str] = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def _lowercase ( self : List[Any] ) -> Any:
_a : Union[str, Any] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
_a : str = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : Dict = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 294
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, ) -> int:
UpperCamelCase : int = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Dict = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Dict = is_training
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : int = scope
UpperCamelCase : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Dict = (image_size // patch_size) ** 2
UpperCamelCase : Optional[Any] = num_patches + 2
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Any:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = DeiTModel(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase : List[Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Tuple = DeiTForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase : Optional[int] = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Any = 1
UpperCamelCase : List[str] = DeiTForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
UpperCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Tuple = self.type_sequence_label_size
UpperCamelCase : int = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCamelCase : str = model(a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[str] = 1
UpperCamelCase : List[str] = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = model(a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[Any] = config_and_inputs
UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = False
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = DeiTModelTester(self )
UpperCamelCase : Any = ConfigTester(self, config_class=a__, has_text_modality=a__, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def snake_case_ ( self ) -> Optional[Any]:
pass
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__, nn.Linear ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = model_class(a__ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], a__ )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Dict:
UpperCamelCase : str = super()._prepare_for_class(a__, a__, return_labels=a__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.train()
UpperCamelCase : str = self._prepare_for_class(a__, a__, return_labels=a__ )
UpperCamelCase : Tuple = model(**a__ ).loss
loss.backward()
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase : str = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
UpperCamelCase : List[Any] = self._prepare_for_class(a__, a__, return_labels=a__ )
UpperCamelCase : Tuple = model(**a__ ).loss
loss.backward()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__ ),
*get_values(a__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase : Optional[Any] = problem_type['title']
UpperCamelCase : Tuple = problem_type['num_labels']
UpperCamelCase : Any = model_class(a__ )
model.to(a__ )
model.train()
UpperCamelCase : str = self._prepare_for_class(a__, a__, return_labels=a__ )
if problem_type["num_labels"] > 1:
UpperCamelCase : int = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
UpperCamelCase : Dict = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__ ) as warning_list:
UpperCamelCase : Optional[Any] = model(**a__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case_ ( self ) -> Dict:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = DeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self ) -> int:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a__ )
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : str = image_processor(images=a__, return_tensors='pt' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCamelCase : Any = model(**a__ )
# verify the logits
UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, a__ )
UpperCamelCase : int = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a__, atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224', torch_dtype=torch.floataa, device_map='auto' )
UpperCamelCase : Any = self.default_image_processor
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : List[str] = image_processor(images=a__, return_tensors='pt' )
UpperCamelCase : Dict = inputs.pixel_values.to(a__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(a__ )
| 360
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
a__ : Optional[int] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ : Any =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a__ : Dict =""
else:
a__ : Tuple ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : List[str] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a__ : List[Any] =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : int =in_proj_weight[
: config.hidden_size, :
]
a__ : Optional[Any] =in_proj_bias[: config.hidden_size]
a__ : str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : str =in_proj_weight[
-config.hidden_size :, :
]
a__ : int =in_proj_bias[-config.hidden_size :]
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Optional[int] =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : int =dct.pop(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =val
def _A ( ):
"""simple docstring"""
a__ : List[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Union[str, Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=True ):
"""simple docstring"""
a__ : Dict =ViTConfig()
# patch_size
if model_name[-1] == "8":
a__ : Optional[Any] =8
# set labels if required
if not base_model:
a__ : Dict =1_000
a__ : Dict ="huggingface/label-files"
a__ : Tuple ="imagenet-1k-id2label.json"
a__ : int =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
a__ : Tuple ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a__ : List[Any] =idalabel
a__ : Optional[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a__ : str =384
a__ : str =1_536
a__ : List[Any] =12
a__ : Dict =6
# load original model from torch hub
a__ : int =torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a__ : List[Any] =original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
a__ : Optional[int] =ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE ).eval()
else:
a__ : str =ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
a__ : Optional[int] =ViTImageProcessor()
a__ : Optional[int] =image_processor(images=prepare_img() , return_tensors="pt" )
a__ : List[str] =encoding["pixel_values"]
a__ : str =model(SCREAMING_SNAKE_CASE )
if base_model:
a__ : List[str] =original_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a__ : Optional[int] =original_model(SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 95
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_a = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ):
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger "
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, generator=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, generator=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : List[str] ):
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion", torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger "
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, generator=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=5_0, output_type="numpy" ).images
__lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 144
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "swin2sr"
__UpperCAmelCase : List[Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any, UpperCAmelCase__ : Dict=6_4, UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : Dict=3, UpperCAmelCase__ : Optional[Any]=1_8_0, UpperCAmelCase__ : Any=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Dict=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Tuple=8, UpperCAmelCase__ : Optional[int]=2.0, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Tuple=0.0, UpperCAmelCase__ : Optional[Any]=0.0, UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Dict="gelu", UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Dict=0.02, UpperCAmelCase__ : Tuple=1E-5, UpperCAmelCase__ : str=2, UpperCAmelCase__ : str=1.0, UpperCAmelCase__ : Optional[int]="1conv", UpperCAmelCase__ : Dict="pixelshuffle", **UpperCAmelCase__ : List[Any], ):
super().__init__(**UpperCAmelCase__ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(UpperCAmelCase__ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 144
| 1
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :int = logging.get_logger(__name__)
# TODO Update this
a_ :Optional[Any] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'esm'
def __init__( self : List[str], _snake_case : List[str]=None, _snake_case : str=None, _snake_case : Tuple=None, _snake_case : List[str]=7_6_8, _snake_case : List[str]=1_2, _snake_case : Any=1_2, _snake_case : List[str]=3_0_7_2, _snake_case : Any=0.1, _snake_case : Any=0.1, _snake_case : int=1_0_2_6, _snake_case : Optional[int]=0.0_2, _snake_case : Optional[Any]=1e-12, _snake_case : int="absolute", _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=None, _snake_case : str=False, _snake_case : Dict=False, _snake_case : List[str]=None, _snake_case : Optional[Any]=None, **_snake_case : Union[str, Any], ) ->Any:
super().__init__(pad_token_id=__UpperCAmelCase, mask_token_id=__UpperCAmelCase, **__UpperCAmelCase )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : str = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Optional[int] = use_cache
snake_case__ : Optional[int] = emb_layer_norm_before
snake_case__ : int = token_dropout
snake_case__ : int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
snake_case__ : List[str] = EsmFoldConfig()
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case__ : List[Any] = EsmFoldConfig(**__UpperCAmelCase )
snake_case__ : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
snake_case__ : Tuple = get_default_vocab_list()
else:
snake_case__ : int = vocab_list
else:
snake_case__ : str = None
snake_case__ : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config, 'use_esm_attn_map', __UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self : Tuple ) ->Dict:
snake_case__ : Dict = super().to_dict()
if isinstance(self.esmfold_config, __UpperCAmelCase ):
snake_case__ : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = None
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
if self.trunk is None:
snake_case__ : List[str] = TrunkConfig()
elif isinstance(self.trunk, __UpperCAmelCase ):
snake_case__ : str = TrunkConfig(**self.trunk )
def lowercase_ ( self : str ) ->Tuple:
snake_case__ : Optional[int] = asdict(self )
snake_case__ : int = self.trunk.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 48
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = None
def lowercase_ ( self : Optional[int] ) ->Dict:
if self.structure_module is None:
snake_case__ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module, __UpperCAmelCase ):
snake_case__ : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case__ : List[str] = self.sequence_state_dim // self.sequence_head_width
snake_case__ : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowercase_ ( self : Any ) ->Optional[int]:
snake_case__ : Union[str, Any] = asdict(self )
snake_case__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 384
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 1e-8
_SCREAMING_SNAKE_CASE = 1e5
def lowercase_ ( self : Optional[Any] ) ->str:
return asdict(self )
def lowercase_ ():
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 277
|
from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
| 0
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCamelCase__( _a ):
def __init__( self : int , lowerCAmelCase : Optional[int]="" , lowerCAmelCase : Union[str, Any]="train" )-> str:
"""simple docstring"""
assert os.path.isdir(snake_case_ )
UpperCAmelCase = []
UpperCAmelCase = os.listdir(snake_case_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if not os.path.isfile(snake_case_ ):
continue
self.documents.append(snake_case_ )
def __len__( self : str )-> List[Any]:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : Any , lowerCAmelCase : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.documents[idx]
UpperCAmelCase = document_path.split('''/''' )[-1]
with open(snake_case_ , encoding='''utf-8''' ) as source:
UpperCAmelCase = source.read()
UpperCAmelCase = process_story(snake_case_ )
return document_name, story_lines, summary_lines
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = list(filter(lambda A : len(_lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase = [_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase = []
UpperCAmelCase = deque(_lowerCAmelCase )
while True:
try:
UpperCAmelCase = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase = list(filter(lambda A : not t.startswith('''@highlight''' ) , _lowerCAmelCase ) )
return story_lines, summary_lines
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase__ ( A : Any , A : int , A : Optional[int] ):
'''simple docstring'''
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def lowerCamelCase__ ( A : str , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = torch.ones_like(_lowerCAmelCase )
UpperCAmelCase = sequence == pad_token_id
UpperCAmelCase = 0
return mask
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = [tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
UpperCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase = [tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
UpperCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase__ ( A : Tuple , A : List[str] ):
'''simple docstring'''
UpperCAmelCase = []
for sequence in batch:
UpperCAmelCase = -1
UpperCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 371
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : str = logging.get_logger(__name__)
_lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_lowercase : Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[Any] = GPTaTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
def a__( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 91
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , a=True , ) -> Dict:
lowercase__ : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : List[str] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : int = parent
lowercase__ : Tuple = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Union[str, Any] = image_size
lowercase__ : Any = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : int = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Any = crop_size
lowercase__ : Any = do_normalize
lowercase__ : List[str] = image_mean
lowercase__ : List[str] = image_std
lowercase__ : Tuple = do_convert_rgb
def _UpperCAmelCase ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _UpperCAmelCase ( self , a=False , a=False , a=False ) -> int:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase__ : Union[str, Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase__ : Any = []
for i in range(self.batch_size ):
lowercase__ , lowercase__ : Dict = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase__ : str = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase__ : Union[str, Any] = [torch.from_numpy(a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = ChineseCLIPImageProcessingTester(self , do_center_crop=a )
@property
def _UpperCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_convert_rgb' ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : int = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Any:
# Initialize image_processing
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : int = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=a )
lowercase__ : Dict = 3
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_convert_rgb' ) )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : int = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Dict = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 77
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = 0
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Tuple = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(a , a )
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = Path(a ) / 'preprocessor_config.json'
lowercase__ : str = Path(a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _UpperCAmelCase ( self ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = Path(a ) / 'preprocessor_config.json'
lowercase__ : int = Path(a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
lowercase__ : List[str] = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase__ : Optional[int] = Path(a ) / 'preprocessor_config.json'
lowercase__ : Optional[int] = Path(a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase__ : int = AutoImageProcessor.from_pretrained(a ).to_dict()
config_dict.pop('image_processor_type' )
lowercase__ : Tuple = CLIPImageProcessor(**a )
# save in new folder
model_config.save_pretrained(a )
config.save_pretrained(a )
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(a )
# make sure private variable is not incorrectly saved
lowercase__ : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(a , a )
def _UpperCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Dict = Path(a ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
lowercase__ : List[str] = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
a , 'clip-base is not a local folder and is not a valid model identifier' ):
lowercase__ : Any = AutoImageProcessor.from_pretrained('clip-base' )
def _UpperCAmelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase__ : Dict = AutoImageProcessor.from_pretrained(a , revision='aaaaaa' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowercase__ : int = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCAmelCase ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
lowercase__ : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
lowercase__ : str = AutoImageProcessor.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def _UpperCAmelCase ( self ) -> int:
try:
AutoConfig.register('custom' , a )
AutoImageProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoImageProcessor.register(a , a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(a ) / 'preprocessor_config.json'
lowercase__ : List[Any] = Path(a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
lowercase__ : Union[str, Any] = CustomImageProcessor.from_pretrained(a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Dict:
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = True
try:
AutoConfig.register('custom' , a )
AutoImageProcessor.register(a , a )
# If remote code is not set, the default is to use local
lowercase__ : int = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase__ : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 77
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[str] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _snake_case ( A__ ):
_lowercase : List[str] = '''gptj'''
_lowercase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=5_0400 , a=2048 , a=4096 , a=28 , a=16 , a=64 , a=None , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=1E-5 , a=0.02 , a=True , a=5_0256 , a=5_0256 , a=False , **a , ) -> str:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = rotary_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a)
class _snake_case ( A__ ):
def __init__( self , a , a = "default" , a = None , a = False , ) -> Optional[Any]:
super().__init__(a , task=a , patching_specs=a , use_past=a)
if not getattr(self._config , 'pad_token_id' , a):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE = 0
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs')
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self._config.n_head
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super(a , self).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 13
| 369
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , a , a=3 , a=32 , a=3 , a=10 , a=[10, 20, 30, 40] , a=[1, 1, 2, 1] , a=True , a=True , a="relu" , a=3 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embeddings_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = len(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TFResNetModel(config=a)
SCREAMING_SNAKE_CASE = model(a)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFResNetForImageClassification(a)
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowercase : Dict = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : List[str] = False
_lowercase : str = False
_lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = TFResNetModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
def check_hidden_states_output(a , a , a):
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a))
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(a) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFResNetModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = tf.constant([-11.10_69, -9.78_77, -8.37_77])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4))
| 327
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class A__ :
def __init__( self , __magic_name__=None , **__magic_name__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCamelCase : Optional[Any] = model
lowerCamelCase : Optional[int] = kwargs.get("""model_save_dir""" , lowerCAmelCase_ )
lowerCamelCase : int = kwargs.get("""latest_model_name""" , lowerCAmelCase_ )
def __call__( self , **__magic_name__ ):
lowerCamelCase : Any = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__=None , __magic_name__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCamelCase : List[str] = '''CPUExecutionProvider'''
return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , **__magic_name__ ):
lowerCamelCase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCamelCase : Any = self.model_save_dir.joinpath(self.latest_model_name )
lowerCamelCase : List[Any] = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCamelCase : Optional[int] = self.model_save_dir.joinpath(lowerCAmelCase_ )
if src_path.exists():
lowerCamelCase : List[str] = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
def UpperCamelCase__ ( self , __magic_name__ , **__magic_name__ , ):
if os.path.isfile(lowerCAmelCase_ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase_ ):
lowerCamelCase : List[str] = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
lowerCamelCase : Dict = Path(lowerCAmelCase_ )
# load model from hub
else:
# download model
lowerCamelCase : int = hf_hub_download(
repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , )
lowerCamelCase : Dict = Path(lowerCAmelCase_ ).parent
lowerCamelCase : Dict = Path(lowerCAmelCase_ ).name
lowerCamelCase : Any = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : List[str] = None
if len(str(lowerCAmelCase_ ).split("""@""" ) ) == 2:
lowerCamelCase : List[str] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 287
|
def lowerCamelCase__ ( a , a ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Optional[int] = max(len(a ) , len(a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 0
|
'''simple docstring'''
UpperCamelCase_ : Optional[int] = 256
# Modulus to hash a string
UpperCamelCase_ : List[Any] = 1000003
def __a ( _UpperCamelCase: str , _UpperCamelCase: str ) -> bool:
"""simple docstring"""
_snake_case = len(lowerCamelCase__ )
_snake_case = len(lowerCamelCase__ )
if p_len > t_len:
return False
_snake_case = 0
_snake_case = 0
_snake_case = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase__ ):
_snake_case = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
"""simple docstring"""
_snake_case = '''abc1abc12'''
_snake_case = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_snake_case = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ ) and not rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 2)
_snake_case = '''ABABX'''
_snake_case = '''ABABZABABYABABX'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 3)
_snake_case = '''AAAB'''
_snake_case = '''ABAAAAAB'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 4)
_snake_case = '''abcdabcy'''
_snake_case = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 5)
_snake_case = '''Lü'''
_snake_case = '''Lüsai'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
_snake_case = '''Lue'''
assert not rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 354
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A: Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A: int = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[Any] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 109
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__snake_case = VQModel
__snake_case = 'sample'
@property
def UpperCamelCase__ ( self , _UpperCAmelCase=(32, 32) ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(_a )
return {"sample": image}
@property
def UpperCamelCase__ ( self ):
return (3, 32, 32)
@property
def UpperCamelCase__ ( self ):
return (3, 32, 32)
def UpperCamelCase__ ( self ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_a )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase__ ( self ):
snake_case_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(_a )
with torch.no_grad():
snake_case_ = model(_a ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
| 361
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=64 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : int = seq_length
lowercase : Union[str, Any] = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : int = use_labels
lowercase : int = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Any = type_sequence_label_size
lowercase : Dict = initializer_range
lowercase : Dict = num_labels
lowercase : Union[str, Any] = num_choices
lowercase : Union[str, Any] = scope
lowercase : List[str] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : str = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : int = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : int = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case_ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = self.prepare_config_and_inputs()
lowercase : Any = True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ )
lowercase : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = True
lowercase : List[Any] = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Optional[int] = model(snake_case_ ,attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : List[Any] = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Tuple = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Union[str, Any] = model(snake_case_ ,attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.num_labels
lowercase : int = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Union[str, Any] = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.num_labels
lowercase : List[str] = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = True
lowercase : Any = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
lowercase : List[Any] = model(snake_case_ ,attention_mask=snake_case_ ,use_cache=snake_case_ )
lowercase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase : str = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ ,output_hidden_states=snake_case_ )
lowercase : str = output_from_no_past["""hidden_states"""][0]
lowercase : Optional[int] = model(
snake_case_ ,attention_mask=snake_case_ ,past_key_values=snake_case_ ,output_hidden_states=snake_case_ ,)["""hidden_states"""][0]
# select random slice
lowercase : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str = config_and_inputs
lowercase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_a : Tuple= (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_a : List[str]= (GPTNeoXForCausalLM,) if is_torch_available() else ()
_a : List[str]= (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : str= False
_a : Optional[Any]= False
_a : Any= False
_a : Tuple= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = GPTNeoXModelTester(self )
lowercase : Tuple = ConfigTester(self ,config_class=snake_case_ ,hidden_size=64 ,num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int = ids_tensor([1, 10] ,config.vocab_size )
lowercase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : str = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
lowercase : Any = original_model(snake_case_ ).last_hidden_state
lowercase : List[Any] = original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : str = {"""type""": scaling_type, """factor""": 10.0}
lowercase : List[str] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
lowercase : int = scaled_model(snake_case_ ).last_hidden_state
lowercase : str = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowercase : List[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
lowercase : Union[str, Any] = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowercase : Any = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
lowercase : Optional[int] = model.generate(**snake_case_ ,do_sample=snake_case_ ,max_new_tokens=20 )
lowercase : Tuple = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ ,snake_case_ )
| 20
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'deberta-v2'
def __init__( self , lowercase_=128_100 , lowercase_=1_536 , lowercase_=24 , lowercase_=24 , lowercase_=6_144 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0 , lowercase_=0.02 , lowercase_=1e-7 , lowercase_=False , lowercase_=-1 , lowercase_=0 , lowercase_=True , lowercase_=None , lowercase_=0 , lowercase_="gelu" , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Optional[Any] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : str = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : Tuple = relative_attention
_snake_case : Tuple = max_relative_positions
_snake_case : Tuple = pad_token_id
_snake_case : int = position_biased_input
# Backwards compatibility
if type(lowercase_ ) == str:
_snake_case : Tuple = [x.strip() for x in pos_att_type.lower().split("|" )]
_snake_case : str = pos_att_type
_snake_case : int = vocab_size
_snake_case : List[str] = layer_norm_eps
_snake_case : List[str] = kwargs.get("pooler_hidden_size" , lowercase_ )
_snake_case : Any = pooler_dropout
_snake_case : Tuple = pooler_hidden_act
class lowercase_ ( __snake_case ):
@property
def UpperCamelCase ( self ):
if self.task == "multiple-choice":
_snake_case : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case : str = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def UpperCamelCase ( self ):
return 12
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 40 , lowercase_ = 40 , lowercase_ = None , ):
_snake_case : int = super().generate_dummy_inputs(preprocessor=lowercase_ , framework=lowercase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 362
|
def snake_case () -> Dict:
'''simple docstring'''
_snake_case : List[str] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 284
| 0
|
from __future__ import annotations
class a__ :
"""simple docstring"""
def __init__( self , lowercase=None ) -> Optional[int]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self ) -> int:
'''simple docstring'''
A__ = []
A__ = self
while temp:
string_rep.append(F'{temp.data}' )
A__ = temp.next
return "->".join(lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> List[str]:
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
A__ = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("Linked List:" )
print(SCREAMING_SNAKE_CASE_ )
print("Elements in Reverse:" )
print_reverse(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 184
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(UpperCamelCase : float , UpperCamelCase : float ) -> bool:
_a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE_ ) )
# The ratio of the area for circle to square is pi/4.
_a = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Callable[[float], float] , UpperCamelCase : float = 0.0 , UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value)
def snake_case_ (UpperCamelCase : int , UpperCamelCase : float = 0.0 , UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(UpperCamelCase : float ) -> float:
return x
_a = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_a = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
_a = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_a = nums[0]
_a = 0
for num in nums[1:]:
_a , _a = (
max_excluding + num,
max(UpperCamelCase , UpperCamelCase ),
)
return max(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__snake_case : Tuple =True
except (ImportError, AttributeError):
__snake_case : Any =object
def lowerCAmelCase__ ( *lowerCamelCase_ : Optional[Any] ,**lowerCamelCase_ : int):
'''simple docstring'''
pass
__snake_case : Any =False
__snake_case : Optional[int] =logging.get_logger('transformers-cli/serving')
def lowerCAmelCase__ ( lowerCamelCase_ : Namespace):
'''simple docstring'''
lowerCAmelCase__ : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(lowerCAmelCase_ ,args.host ,args.port ,args.workers)
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = parser.add_parser(
'''serve''' ,help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' ,type=lowerCAmelCase_ ,choices=get_supported_tasks() ,help='''The task to run the pipeline on''' ,)
serve_parser.add_argument('''--host''' ,type=lowerCAmelCase_ ,default='''localhost''' ,help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' ,type=lowerCAmelCase_ ,default=88_88 ,help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' ,type=lowerCAmelCase_ ,default=1 ,help='''Number of http workers''' )
serve_parser.add_argument('''--model''' ,type=lowerCAmelCase_ ,help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' ,type=lowerCAmelCase_ ,help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' ,type=lowerCAmelCase_ ,help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' ,type=lowerCAmelCase_ ,default=-1 ,help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' ,)
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = pipeline
lowerCAmelCase__ : Tuple = host
lowerCAmelCase__ : Tuple = port
lowerCAmelCase__ : Optional[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
lowerCAmelCase__ : str = FastAPI(
routes=[
APIRoute(
'''/''' ,self.model_info ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''GET'''] ,),
APIRoute(
'''/tokenize''' ,self.tokenize ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
APIRoute(
'''/detokenize''' ,self.detokenize ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
APIRoute(
'''/forward''' ,self.forward ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
] ,timeout=6_00 ,)
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase__ (self ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ) -> Any:
"""simple docstring"""
try:
lowerCAmelCase__ : Dict = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
lowerCAmelCase__ : List[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ ,tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'''model''': '''''', '''error''': str(lowerCAmelCase_ )} )
def lowerCAmelCase__ (self ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,) -> Optional[Any]:
"""simple docstring"""
try:
lowerCAmelCase__ : int = self._pipeline.tokenizer.decode(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
return ServeDeTokenizeResult(model='''''' ,text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'''model''': '''''', '''error''': str(lowerCAmelCase_ )} )
async def lowerCAmelCase__ (self ,__lowerCamelCase=Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ) -> Optional[int]:
"""simple docstring"""
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
lowerCAmelCase__ : Optional[int] = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 ,{'''error''': str(lowerCAmelCase_ )} )
| 129
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284
| 0
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A: Optional[int] = logging.get_logger(__name__)
A: Dict = {"vocab_file": "spiece.model"}
A: Any = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
A: Tuple = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : Union[str, Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
UpperCAmelCase : Union[str, Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase : Any = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
UpperCAmelCase : Tuple = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase : Optional[Any] = '''<pad>''' if pad_token is None else pad_token
UpperCAmelCase : Union[str, Any] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
UpperCAmelCase : Optional[Any] = do_lower_case
UpperCAmelCase : Any = remove_space
UpperCAmelCase : List[Any] = keep_accents
UpperCAmelCase : Tuple = vocab_file
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase : Union[str, Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase : Any = re.compile(
F"[{''.join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.non_printing_characters_re.sub("""""" , _a )
# Normalize whitespaces
UpperCAmelCase : Dict = ''''''.join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
UpperCAmelCase : Optional[Any] = unicodedata.normalize("""NFC""" , _a )
return text
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(_a )
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return out_string
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : int = []
UpperCAmelCase : Any = ''''''
UpperCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(_a )
UpperCAmelCase : Any = False
out_string += self.sp_model.decode(_a )
return out_string
def SCREAMING_SNAKE_CASE ( self ) -> Dict[str, int]:
'''simple docstring'''
UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Any = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(_a , _a ):
UpperCAmelCase : Tuple = self.preprocess_text(_a )
UpperCAmelCase : int = self.sp_model.encode(_a )
else:
UpperCAmelCase : Dict = [self.preprocess_text(_a ) for t in text]
UpperCAmelCase : Optional[int] = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase : List[str] = torch.tensor(_a )
return token_ids
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self.sp_model.decode(_a )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
UpperCAmelCase : Optional[int] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(_a ) + F"{self.bos_token}Bot:"
)
return self.encode(text=_a )
| 365
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _snake_case ( UpperCamelCase : np.ndarray ):
return input_array.reshape((input_array.size, 1) )
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = np.nan
for i in range(UpperCamelCase ):
UpperCAmelCase : int = features[:, labels == i]
UpperCAmelCase : List[Any] = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase : Dict = data - column_reshape(UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase : Optional[Any] = np.dot(UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int ):
UpperCAmelCase : Tuple = features.mean(1 )
UpperCAmelCase : Union[str, Any] = np.nan
for i in range(UpperCamelCase ):
UpperCAmelCase : int = features[:, labels == i]
UpperCAmelCase : List[str] = data.shape[1]
UpperCAmelCase : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : int ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase : Tuple = features.mean(1 )
# Center the dataset
UpperCAmelCase : List[str] = features - np.reshape(UpperCamelCase , (data_mean.size, 1) )
UpperCAmelCase : str = np.dot(UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCAmelCase , UpperCAmelCase : int = np.linalg.eigh(UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase : int = np.dot(filtered_eigenvectors.T , UpperCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : int ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase , UpperCAmelCase : Dict = eigh(
covariance_between_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , covariance_within_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
UpperCAmelCase : Any = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = np.linalg.svd(UpperCamelCase )
UpperCAmelCase : Tuple = svd_matrix[:, 0:dimensions]
UpperCAmelCase : Tuple = np.dot(filtered_svd_matrix.T , UpperCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _snake_case ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase : Dict = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase : List[str] = 2
UpperCAmelCase : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase ) as error_info:
UpperCAmelCase : Union[str, Any] = linear_discriminant_analysis(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def _snake_case ( ):
UpperCAmelCase : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Any = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase ) as error_info:
UpperCAmelCase : Tuple = principal_component_analysis(UpperCamelCase , UpperCamelCase )
if not np.allclose(UpperCamelCase , UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[Any] , a__ : str , a__ : str=13 , a__ : Any=32 , a__ : List[Any]=3 , a__ : Optional[int]=4 , a__ : Any=[10, 20, 30, 40] , a__ : Any=[2, 2, 3, 2] , a__ : Tuple=True , a__ : int=True , a__ : str=37 , a__ : List[str]="gelu" , a__ : Union[str, Any]=10 , a__ : List[Any]=0.0_2 , a__ : Union[str, Any]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=3 , a__ : Dict=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = out_features
__snake_case = num_labels
__snake_case = scope
__snake_case = num_stages
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : Any ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def a (self : Optional[Any] ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=a__ , loss_ignore_index=255 , num_labels=self.num_labels , )
def a (self : Tuple , a__ : int , a__ : Optional[int] , a__ : Tuple ):
"""simple docstring"""
__snake_case = UperNetForSemanticSegmentation(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ : int = False
A_ : int = False
A_ : Tuple = False
A_ : Any = False
A_ : Optional[Any] = False
A_ : Optional[Any] = False
def a (self : Dict ):
"""simple docstring"""
__snake_case = UperNetModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
return
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a (self : str ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[Any] , a__ : List[str] , a__ : int ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a__ )
__snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def a (self : int ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = UperNetForSemanticSegmentation.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> int:
__snake_case = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__snake_case = Image.open(snake_case_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__snake_case = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(a__ )
__snake_case = prepare_img()
__snake_case = processor(images=a__ , return_tensors='''pt''' ).to(a__ )
with torch.no_grad():
__snake_case = model(**a__ )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1E-4 ) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__snake_case = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(a__ )
__snake_case = prepare_img()
__snake_case = processor(images=a__ , return_tensors='''pt''' ).to(a__ )
with torch.no_grad():
__snake_case = model(**a__ )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1E-4 ) )
| 24
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__snake_case ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__snake_case ="""▁"""
class UpperCAmelCase_ ( a_ ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]="[CLS]" , UpperCAmelCase__ : Tuple="[SEP]" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : int="[MASK]" , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : Optional[int] , ) -> None:
lowerCAmelCase = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
return len(self.sp_model )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Any:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : Dict ) -> Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] ) -> Any:
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , lowercase_ )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase_ )
lowerCAmelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
lowerCAmelCase = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> List[str]:
return self.sp_model.PieceToId(lowercase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Any:
return self.sp_model.IdToPiece(lowercase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase_ )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Optional[Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 364
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case =logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=-1 ) -> Dict:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase = label_idx
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase = []
lowerCAmelCase = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
lowerCAmelCase = []
lowerCAmelCase = []
else:
lowerCAmelCase = line.split(' ' )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Any:
lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(UpperCAmelCase__ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = []
lowerCAmelCase = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> str:
lowerCAmelCase = 0
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = preds_list[example_id]
lowerCAmelCase = ''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 55
| 0
|
'''simple docstring'''
def UpperCamelCase_( snake_case : str , snake_case : int ):
'''simple docstring'''
snake_case_ = word.split()
def justify(snake_case : list , snake_case : int , snake_case : int ) -> str:
snake_case_ = max_width - width
snake_case_ = len(snake_case )
if len(snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case ):
num_spaces_between_words_list[i] += 1
snake_case_ = []
for i in range(snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case )
snake_case_ = []
snake_case_ = []
snake_case_ = 0
for word in words:
if width + len(snake_case ) + len(snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case )
width += len(snake_case )
else:
# justify the line and add it to result
answer.append(justify(snake_case , snake_case , snake_case ) )
# reset new line and new width
snake_case_ , snake_case_ = [word], len(snake_case )
snake_case_ = max_width - width - len(snake_case )
answer.append(" ".join(snake_case ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = rotary_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = None
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
def _snake_case ( self ) -> str:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = FlaxGPTJModelTester(self )
def _snake_case ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@tooslow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = False
_lowerCAmelCase = model.config.eos_token_id
_lowerCAmelCase = jax.jit(model.generate )
_lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_lowerCAmelCase = fx_state
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_lowerCAmelCase = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = pt_model_class.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 158
| 0
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = 5
# Realm tok
UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(A_ , exist_ok=A_ )
UpperCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(A_ , exist_ok=A_ )
def __UpperCamelCase ( self ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=A_ , )
return block_records
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = self.get_dummy_retriever()
UpperCamelCase = retriever.tokenizer
UpperCamelCase = np.array([0, 3] , dtype='long' )
UpperCamelCase = tokenizer(['Test question'] ).input_ids
UpperCamelCase = tokenizer(
['the fourth'] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
UpperCamelCase = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='np' )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = self.get_dummy_retriever()
UpperCamelCase = retriever.tokenizer
UpperCamelCase = np.array([0, 3, 5] , dtype='long' )
UpperCamelCase = tokenizer(['Test question'] ).input_ids
UpperCamelCase = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
UpperCamelCase = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='np' )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 110
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : torch.FloatTensor
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("DownEncoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_=True , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = torch.nn.Convad(
A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
# down
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_down_block(
A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , )
self.down_blocks.append(A_ )
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# out
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = 2 * out_channels if double_z else out_channels
UpperCamelCase = nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = x
UpperCamelCase = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , use_reentrant=A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , use_reentrant=A_ )
else:
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A_ )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase = down_block(A_ )
# middle
UpperCamelCase = self.mid_block(A_ )
# post-process
UpperCamelCase = self.conv_norm_out(A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("UpDecoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_="group" , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = nn.Convad(
A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# up
UpperCamelCase = list(reversed(A_ ) )
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_up_block(
A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , )
self.up_blocks.append(A_ )
UpperCamelCase = output_channel
# out
if norm_type == "spatial":
UpperCamelCase = SpatialNorm(block_out_channels[0] , A_ )
else:
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = nn.Convad(block_out_channels[0] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ , A_=None ) -> Dict:
"""simple docstring"""
UpperCamelCase = z
UpperCamelCase = self.conv_in(A_ )
UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ , use_reentrant=A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , A_ , use_reentrant=A_ )
else:
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ , A_ )
else:
# middle
UpperCamelCase = self.mid_block(A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = up_block(A_ , A_ )
# post-process
if latent_embeds is None:
UpperCamelCase = self.conv_norm_out(A_ )
else:
UpperCamelCase = self.conv_norm_out(A_ , A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_=None , A_="random" , A_=False , A_=True ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = n_e
UpperCamelCase = vq_embed_dim
UpperCamelCase = beta
UpperCamelCase = legacy
UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase = self.used.shape[0]
UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase = self.re_embed
UpperCamelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
UpperCamelCase = n_e
UpperCamelCase = sane_index_shape
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase = match.argmax(-1 )
UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase = self.unknown_index
return new.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase = 0 # simply set to zero
UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_ )
return back.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase = torch.argmin(torch.cdist(A_ , self.embedding.weight ) , dim=1 )
UpperCamelCase = self.embedding(A_ ).view(z.shape )
UpperCamelCase = None
UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase = self.remap_to_used(A_ )
UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase = self.unmap_to_all(A_ )
UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase = self.embedding(A_ )
if shape is not None:
UpperCamelCase = z_q.view(A_ )
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_=False ) -> Any:
"""simple docstring"""
UpperCamelCase = parameters
UpperCamelCase , UpperCamelCase = torch.chunk(A_ , 2 , dim=1 )
UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase = deterministic
UpperCamelCase = torch.exp(0.5 * self.logvar )
UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase = UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , A_ = None ) -> torch.FloatTensor:
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase = randn_tensor(
self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , A_=None ) -> Tuple:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , A_ , A_=[1, 2, 3] ) -> Optional[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.mean
| 110
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(_SCREAMING_SNAKE_CASE )
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , ) -> Any:
if sum(_SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(_SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(_SCREAMING_SNAKE_CASE ) == max_sum:
result.append(_SCREAMING_SNAKE_CASE )
return
for index in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , _SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
lowercase : Optional[int] = [3, 34, 4, 12, 5, 2]
lowercase : Optional[int] = 9
lowercase : int = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 42
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase=None ) -> int:
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__(self , UpperCAmelCase ) -> Dict:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
_snake_case = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_snake_case = text
def lowercase (self ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> Any:
self.generated_responses.append(UpperCAmelCase )
def lowercase (self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ) -> Optional[int]:
_snake_case = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_snake_case = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__snake_case , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowercase (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__(self , UpperCAmelCase , UpperCAmelCase=0 , **UpperCAmelCase ) -> Union[str, Any]:
_snake_case = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowercase (self , UpperCAmelCase , UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_snake_case = self.tokenizer._build_conversation_input_ids(UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(UpperCAmelCase )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase (self , UpperCAmelCase , UpperCAmelCase=10 , **UpperCAmelCase ) -> Optional[int]:
_snake_case = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_snake_case = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs["""attention_mask"""][:, -trim:]
_snake_case = model_inputs.pop("""conversation""" )
_snake_case = max_length
_snake_case = self.model.generate(**UpperCAmelCase , **UpperCAmelCase )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase (self , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]:
_snake_case = model_outputs["""output_ids"""]
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
_snake_case = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCAmelCase )
return conversation
def lowercase (self , UpperCAmelCase ) -> Dict:
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
if len(UpperCAmelCase ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 341
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = XGLMTokenizer
UpperCAmelCase_ = XGLMTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :str ) -> List[Any]:
UpperCAmelCase__ = "<pad>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def UpperCAmelCase_ ( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase ) , 1008 )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def UpperCAmelCase_ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase__ = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self :str ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
UpperCAmelCase__ = XGLMTokenizer(f.name , keep_accents=lowerCamelCase )
UpperCAmelCase__ = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Optional[int] ) -> int:
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def UpperCAmelCase_ ( self :Optional[Any] ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase__ = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="facebook/xglm-564M" , padding=lowerCamelCase , )
| 169
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """char"""
UpperCAmelCase_ = """bpe"""
UpperCAmelCase_ = """wp"""
_lowerCAmelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase_ = """ViTImageProcessor"""
UpperCAmelCase_ = """MgpstrTokenizer"""
def __init__( self :List[str] , lowerCamelCase :Dict=None , lowerCamelCase :Optional[int]=None , **lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :List[str]=None , lowerCamelCase :Any=None , lowerCamelCase :Optional[Any]=None , **lowerCamelCase :Optional[int] ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None:
UpperCAmelCase__ = self.char_tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sequences
UpperCAmelCase__ = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "char" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "bpe" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "wp" )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(lowerCamelCase ):
UpperCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ = scores.index(max(lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ = {}
UpperCAmelCase__ = final_strs
UpperCAmelCase__ = final_scores
UpperCAmelCase__ = char_strs
UpperCAmelCase__ = bpe_strs
UpperCAmelCase__ = wp_strs
return out
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :List[str] ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
UpperCAmelCase__ = self.char_decode
UpperCAmelCase__ = 1
UpperCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase__ = self.bpe_decode
UpperCAmelCase__ = 2
UpperCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ = self.wp_decode
UpperCAmelCase__ = 102
UpperCAmelCase__ = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = pred_logits.size(0 )
UpperCAmelCase__ = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase , sorted=lowerCamelCase )
UpperCAmelCase__ = preds_index.view(-1 , lowerCamelCase )[:, 1:]
UpperCAmelCase__ = decoder(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.nn.functional.softmax(lowerCamelCase , dim=2 ).max(dim=2 )
UpperCAmelCase__ = preds_max_prob[:, 1:]
for index in range(lowerCamelCase ):
UpperCAmelCase__ = preds_str[index].find(lowerCamelCase )
UpperCAmelCase__ = preds_str[index][:pred_eos]
UpperCAmelCase__ = preds_index[index].cpu().tolist()
UpperCAmelCase__ = pred_index.index(lowerCamelCase ) if eos_token in pred_index else -1
UpperCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase )
conf_scores.append(lowerCamelCase )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int ) -> List[str]:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Dict ) -> Dict:
return self.bpe_tokenizer.batch_decode(lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> Tuple:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
| 169
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PerceiverFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
| 0
|
"""simple docstring"""
A: List[str] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A: Any = [{"type": "code", "content": INSTALL_CONTENT}]
A: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 109
|
'''simple docstring'''
import re
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''', str_ )]
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool, UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Any =split_input(UpperCamelCase__ )
if upper:
SCREAMING_SNAKE_CASE__ : int =''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE__ : Any =''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return to_simple_case(UpperCamelCase__ )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : List[str] =to_simple_case(UpperCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool ):
'''simple docstring'''
return to_complex_case(UpperCamelCase__, UpperCamelCase__, '''_''' )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool ):
'''simple docstring'''
return to_complex_case(UpperCamelCase__, UpperCamelCase__, '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 152
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 ) -> Tuple:
snake_case_ = []
for _ in range(_SCREAMING_SNAKE_CASE ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 ) -> List[str]:
snake_case_ = []
for step in range(_SCREAMING_SNAKE_CASE ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """schedule.bin""" )
torch.save(scheduler.state_dict() , _SCREAMING_SNAKE_CASE )
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE )
scheduler.load_state_dict(_SCREAMING_SNAKE_CASE )
return lrs
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->List[str]:
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
snake_case_ = torch.tensor([0.4, 0.2, -0.5] )
snake_case_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
snake_case_ = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
snake_case_ = torch.tensor([0.4, 0.2, -0.5] )
snake_case_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , )
for _ in range(1_000 ):
snake_case_ = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: int = nn.Linear(50 , 50) if is_torch_available() else None
__lowercase: Union[str, Any] = AdamW(m.parameters() , lr=1_0.0) if is_torch_available() else None
__lowercase: str = 10
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=None ) ->List[str]:
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
snake_case_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
snake_case_ , snake_case_ = data
snake_case_ = scheduler_func(self.optimizer , **UpperCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
snake_case_ = unwrap_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , )
snake_case_ = scheduler_func(self.optimizer , **UpperCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule
snake_case_ = unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=F"""failed for {scheduler_func} in save and reload""" )
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = fn
def __call__( self : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ) ->Union[str, Any]:
"""simple docstring"""
return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str ) ->str:
"""simple docstring"""
snake_case_ = list(map(self , scheduler.lr_lambdas ) )
| 233
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> Union[str, Any]:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ShapEImgaImgPipeline
_snake_case = ['image']
_snake_case = ['image']
_snake_case = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_snake_case = False
@property
def A__ ( self )-> Any:
'''simple docstring'''
return 32
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
return 32
@property
def A__ ( self )-> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self )-> Any:
'''simple docstring'''
return 8
@property
def A__ ( self )-> Any:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
return model
@property
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_resize=SCREAMING_SNAKE_CASE_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def A__ ( self )-> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__UpperCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def A__ ( self )-> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__UpperCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase = self.dummy_prior
__UpperCamelCase = self.dummy_image_encoder
__UpperCamelCase = self.dummy_image_processor
__UpperCamelCase = self.dummy_renderer
__UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__UpperCamelCase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
__UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = '''cpu'''
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = output.images[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self )-> str:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = torch_device == '''cpu'''
__UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCamelCase = batch_size * [inputs[key]]
__UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__UpperCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__UpperCamelCase = pipe(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 328
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """unispeech-sat"""
def __init__( self : List[Any] , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=7_6_8 , __UpperCamelCase : Optional[Any]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[int]=3_0_7_2 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Dict=1e-5 , __UpperCamelCase : Tuple="group" , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=1_2_8 , __UpperCamelCase : str=1_6 , __UpperCamelCase : Any=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : int=1_0 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Dict=1_0 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : str=3_2_0 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=1_0_0 , __UpperCamelCase : List[str]=2_5_6 , __UpperCamelCase : Optional[int]=2_5_6 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[int]="mean" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : int=False , __UpperCamelCase : Dict=2_5_6 , __UpperCamelCase : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCamelCase : str=(5, 3, 3, 1, 1) , __UpperCamelCase : Any=(1, 2, 3, 1, 1) , __UpperCamelCase : Union[str, Any]=5_1_2 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=5_0_4 , **__UpperCamelCase : str , )->int:
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
_UpperCAmelCase = num_clusters
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = feat_quantizer_dropout
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = xvector_output_dim
@property
def lowercase__ ( self : int )->Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 326
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "xglm"
lowercase__ = ["past_key_values"]
lowercase__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self: Dict, a_: int=256_008, a_: List[str]=2_048, a_: Dict=1_024, a_: int=4_096, a_: List[Any]=24, a_: Any=16, a_: Dict="gelu", a_: Optional[Any]=0.1, a_: str=0.1, a_: Union[str, Any]=0.0, a_: List[str]=0.0, a_: List[Any]=0.02, a_: Dict=True, a_: int=True, a_: List[Any]=2, a_: str=1, a_: Optional[int]=0, a_: Tuple=2, **a_: Tuple, ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Union[str, Any] = d_model
_snake_case : Optional[int] = ffn_dim
_snake_case : List[Any] = num_layers
_snake_case : int = attention_heads
_snake_case : int = activation_function
_snake_case : List[str] = dropout
_snake_case : List[Any] = attention_dropout
_snake_case : Any = activation_dropout
_snake_case : Union[str, Any] = layerdrop
_snake_case : int = init_std
_snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : Union[str, Any] = use_cache
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, **a_, )
| 64
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = WavaVecaForSequenceClassification.from_pretrained(lowercase__ , config=lowercase__ )
lowercase__ = downstream_dict["""projector.weight"""]
lowercase__ = downstream_dict["""projector.bias"""]
lowercase__ = downstream_dict["""model.post_net.linear.weight"""]
lowercase__ = downstream_dict["""model.post_net.linear.bias"""]
return model
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = WavaVecaForAudioFrameClassification.from_pretrained(lowercase__ , config=lowercase__ )
lowercase__ = downstream_dict["""model.linear.weight"""]
lowercase__ = downstream_dict["""model.linear.bias"""]
return model
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = WavaVecaForXVector.from_pretrained(lowercase__ , config=lowercase__ )
lowercase__ = downstream_dict["""connector.weight"""]
lowercase__ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowercase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowercase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowercase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowercase__ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )
lowercase__ = checkpoint["""Downstream"""]
lowercase__ = WavaVecaConfig.from_pretrained(lowercase__ )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ , return_attention_mask=lowercase__ , do_normalize=lowercase__ )
lowercase__ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowercase__ = convert_classification(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowercase__ = convert_diarization(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForXVector""" ):
lowercase__ = convert_xvector(lowercase__ , lowercase__ , lowercase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase__ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 164
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str ={"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266
|
from __future__ import annotations
import requests
_lowercase : Dict =set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowerCAmelCase_ ( _lowercase : str , _lowercase : int = 1 , _lowercase : str = "new" , _lowercase : list | None = None) -> dict:
"""simple docstring"""
a__ : Optional[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase) - valid_terms)):
a__ : Any = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_lowercase)
a__ : int = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
a__ : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase)}
a__ : int = {}
for id_ in range(_lowercase):
a__ : List[Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 266
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase : Dict = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCAmelCase : Any = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : List[str] =None
# source code of `config_class`
a__ : Optional[int] =inspect.getsource(__snake_case )
a__ : Union[str, Any] =_re_checkpoint.findall(__snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
a__ : Dict =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a__ : List[Any] =f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
a__ : Optional[int] =ckpt_name
break
return checkpoint
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a__ : int =get_checkpoint_from_config_class(__snake_case )
a__ : Optional[int] =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__snake_case )
if len(__snake_case ) > 0:
a__ : Union[str, Any] ="\n".join(sorted(__snake_case ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 95
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {"""vocab_file""": """spiece.model"""}
_A : List[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
_A : str = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ):
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase : Optional[Any] = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __magic_name__ ( self ):
return self.sp_model.get_piece_size()
def __magic_name__ ( self ):
lowercase : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : Union[str, Any] = self.__dict__.copy()
lowercase : Dict = None
return state
def __setstate__( self , _a ):
lowercase : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase : List[str] = {}
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , _a ):
return self.sp_model.encode(_a , out_type=_a )
def __magic_name__ ( self , _a ):
return self.sp_model.piece_to_id(_a )
def __magic_name__ ( self , _a ):
lowercase : Union[str, Any] = self.sp_model.IdToPiece(_a )
return token
def __magic_name__ ( self , _a ):
lowercase : List[Any] = []
lowercase : List[Any] = ""
lowercase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowercase : int = True
lowercase : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
lowercase : int = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __magic_name__ ( self , _a , _a = False , _a = None , _a = True , **_a , ):
lowercase : int = kwargs.pop("use_source_tokenizer" , _a )
lowercase : Union[str, Any] = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : Any = []
lowercase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
lowercase : int = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowercase : Dict = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_a ) )
else:
lowercase : Union[str, Any] = "".join(_a )
lowercase : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : Union[str, Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : List[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowercase : int = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __magic_name__ ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
lowercase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 202
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_UpperCAmelCase : str = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_UpperCAmelCase : List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_UpperCAmelCase : Union[str, Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_UpperCAmelCase : Optional[int] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_UpperCAmelCase : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_UpperCAmelCase : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 358
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 45
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
'''simple docstring'''
import os
def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCamelCase_ = in_file.read()
lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCamelCase_ = [[0 for cell in row] for row in grid]
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCamelCase_ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
_A : str = ['''input_features''', '''attention_mask''']
def __init__( self , snake_case__=80 , snake_case__=1_60_00 , snake_case__=80 , snake_case__=0.0 , snake_case__=True , snake_case__=True , snake_case__=True , **snake_case__ , ) -> str:
"""simple docstring"""
super().__init__(feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ )
UpperCAmelCase = num_mel_bins
UpperCAmelCase = do_ceptral_normalize
UpperCAmelCase = normalize_means
UpperCAmelCase = normalize_vars
UpperCAmelCase = True
def UpperCamelCase_ ( self , snake_case__ , ) -> str:
"""simple docstring"""
UpperCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase = torch.from_numpy(A__ ).unsqueeze(0 )
UpperCAmelCase = ta_kaldi.fbank(A__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase_ ( snake_case__ , snake_case__ , snake_case__ = True , snake_case__ = True , snake_case__ = 0.0 , ) -> Tuple:
"""simple docstring"""
if normalize_means:
UpperCAmelCase = x[:input_length].mean(axis=0 )
UpperCAmelCase = np.subtract(A__ , A__ )
if normalize_vars:
UpperCAmelCase = x[:input_length].std(axis=0 )
UpperCAmelCase = np.divide(A__ , A__ )
if input_length < x.shape[0]:
UpperCAmelCase = padding_value
# make sure array is in float32
UpperCAmelCase = x.astype(np.floataa )
return x
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(A__ , A__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(A__ , A__ )
]
def __call__( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> Optional[int]:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase = isinstance(A__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase = is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase = [np.asarray(A__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
UpperCAmelCase = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase = [raw_speech]
# extract fbank features
UpperCAmelCase = [self._extract_fbank_features(A__ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase = BatchFeature({"""input_features""": features} )
UpperCAmelCase = self.pad(
A__ , padding=A__ , max_length=A__ , truncation=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , **A__ , )
# make sure list is in array format
UpperCAmelCase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , A__ ):
UpperCAmelCase = [np.asarray(A__ , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase = [np.asarray(A__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase = (
np.array(A__ , dtype=np.intaa )
if self._get_padding_strategies(A__ , max_length=A__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=A__ )
if return_tensors is not None:
UpperCAmelCase = padded_inputs.convert_to_tensors(A__ )
return padded_inputs
| 359
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase_ ( a_ ):
_A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A : ClassVar[Features] = Features({'image': Image()} )
_A : ClassVar[Features] = Features({'labels': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 248
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__, param_name='crop_size')
snake_case_ = do_resize
snake_case_ = do_rescale
snake_case_ = do_normalize
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = size
snake_case_ = resample
snake_case_ = rescale_factor
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=size['shortest_edge'], default_to_square=lowerCAmelCase__)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
snake_case_ = (size['height'], size['width'])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}')
return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> BatchFeature:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size', default_to_square=lowerCAmelCase__)
snake_case_ = resample if resample is not None else self.resample
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__)
if not is_batched(lowerCAmelCase__):
snake_case_ = [images]
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=lowerCAmelCase__, size=lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 69
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase__ ( lowerCamelCase : int ):
_A : str = SwinConfig(image_size=192 )
if "base" in model_name:
_A : int = 6
_A : str = 128
_A : Optional[int] = (2, 2, 18, 2)
_A : Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
_A : Tuple = 12
_A : int = 192
_A : str = (2, 2, 18, 2)
_A : Tuple = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
_A : Tuple = window_size
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Optional[int] = num_heads
return config
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ):
if "encoder.mask_token" in name:
_A : Union[str, Any] = name.replace('encoder.mask_token' ,'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
_A : Tuple = name.replace('encoder.patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
_A : List[str] = name.replace('encoder.patch_embed.norm' ,'embeddings.norm' )
if "attn.proj" in name:
_A : Any = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
_A : Optional[int] = name.replace('attn' ,'attention.self' )
if "norm1" in name:
_A : Optional[int] = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
_A : Dict = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
_A : List[Any] = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
_A : Optional[Any] = name.replace('mlp.fc2' ,'output.dense' )
if name == "encoder.norm.weight":
_A : List[str] = 'layernorm.weight'
if name == "encoder.norm.bias":
_A : Union[str, Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
_A : Optional[Any] = 'swin.' + name
return name
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
_A : Optional[int] = orig_state_dict.pop(lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
_A : Optional[int] = key.split('.' )
_A : int = int(key_split[2] )
_A : List[Any] = int(key_split[4] )
_A : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A : str = val[:dim, :]
_A : str = val[
dim : dim * 2, :
]
_A : List[Any] = val[-dim:, :]
else:
_A : Optional[int] = val[
:dim
]
_A : Tuple = val[
dim : dim * 2
]
_A : Union[str, Any] = val[
-dim:
]
else:
_A : int = val
return orig_state_dict
def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : List[Any] ):
_A : List[str] = torch.load(lowerCamelCase ,map_location='cpu' )['model']
_A : str = get_swin_config(lowerCamelCase )
_A : List[str] = SwinForMaskedImageModeling(lowerCamelCase )
model.eval()
_A : Any = convert_state_dict(lowerCamelCase ,lowerCamelCase )
model.load_state_dict(lowerCamelCase )
_A : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : Union[str, Any] = ViTImageProcessor(size={'height': 192, 'width': 192} )
_A : int = Image.open(requests.get(lowerCamelCase ,stream=lowerCamelCase ).raw )
_A : List[str] = image_processor(images=lowerCamelCase ,return_tensors='pt' )
with torch.no_grad():
_A : List[Any] = model(**lowerCamelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 227
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Tuple):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
_A : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
_A : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Any = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , )
_A : Tuple = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : str):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Tuple):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : int):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(SCREAMING_SNAKE_CASE : Union[str, Any]):
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
_A : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = MultiControlNetModel([controlneta, controlneta])
_A : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
]
_A : str = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : Tuple):
_A : List[str] = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
_A : int = 10.0
_A : Union[str, Any] = 4
_A : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : List[Any] = steps
_A : List[str] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE)[0]
_A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = steps
_A : Any = scale
_A : Dict = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
_A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : str = steps
_A : List[Any] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
_A : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Tuple = steps
_A : Tuple = scale
_A : str = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def A ( self : Optional[Any]):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Any):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : Dict):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def A ( self : str):
_A : Optional[int] = self.get_dummy_components()
_A : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
_A : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny')
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE , controlnet=SCREAMING_SNAKE_CASE)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.Generator(device='cpu').manual_seed(0)
_A : List[Any] = 'evil space-punk bird'
_A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512))
_A : List[str] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512))
_A : Dict = pipe(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , control_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_A : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy')
assert np.abs(expected_image - image).max() < 9e-2
| 227
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__UpperCamelCase : int = """MobileNetV1Config"""
# Base docstring
__UpperCamelCase : str = """google/mobilenet_v1_1.0_224"""
__UpperCamelCase : str = [1, 1024, 7, 7]
# Image classification docstring
__UpperCamelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
__UpperCamelCase : Optional[Any] = """tabby, tabby cat"""
__UpperCamelCase : Union[str, Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a_ ( _A , _A , _A=None ) -> List[Any]:
"""simple docstring"""
snake_case__ = {}
if isinstance(_A , _A ):
snake_case__ = model.mobilenet_va
else:
snake_case__ = model
snake_case__ = 'MobilenetV1/Conv2d_0/'
snake_case__ = backbone.conv_stem.convolution.weight
snake_case__ = backbone.conv_stem.normalization.bias
snake_case__ = backbone.conv_stem.normalization.weight
snake_case__ = backbone.conv_stem.normalization.running_mean
snake_case__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
snake_case__ = i + 1
snake_case__ = i * 2
snake_case__ = backbone.layer[pt_index]
snake_case__ = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
snake_case__ = pointer.convolution.weight
snake_case__ = pointer.normalization.bias
snake_case__ = pointer.normalization.weight
snake_case__ = pointer.normalization.running_mean
snake_case__ = pointer.normalization.running_var
snake_case__ = backbone.layer[pt_index + 1]
snake_case__ = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
snake_case__ = pointer.convolution.weight
snake_case__ = pointer.normalization.bias
snake_case__ = pointer.normalization.weight
snake_case__ = pointer.normalization.running_mean
snake_case__ = pointer.normalization.running_var
if isinstance(_A , _A ):
snake_case__ = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
snake_case__ = model.classifier.weight
snake_case__ = model.classifier.bias
return tf_to_pt_map
def a_ ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
# Build TF to PyTorch weights loading map
snake_case__ = _build_tf_to_pytorch_map(_A , _A , _A )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
snake_case__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
snake_case__ = np.transpose(_A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
snake_case__ = array.squeeze().transpose()
else:
snake_case__ = np.transpose(_A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
snake_case__ = torch.from_numpy(_A )
tf_weights.pop(_A , _A )
tf_weights.pop(name + '/RMSProp' , _A )
tf_weights.pop(name + '/RMSProp_1' , _A )
tf_weights.pop(name + '/ExponentialMovingAverage' , _A )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def a_ ( _A , _A ) -> torch.Tensor:
"""simple docstring"""
snake_case__ , snake_case__ = features.shape[-2:]
snake_case__ , snake_case__ = conv_layer.stride
snake_case__ , snake_case__ = conv_layer.kernel_size
if in_height % stride_height == 0:
snake_case__ = max(kernel_height - stride_height , 0 )
else:
snake_case__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
snake_case__ = max(kernel_width - stride_width , 0 )
else:
snake_case__ = max(kernel_width - (in_width % stride_width) , 0 )
snake_case__ = pad_along_width // 2
snake_case__ = pad_along_width - pad_left
snake_case__ = pad_along_height // 2
snake_case__ = pad_along_height - pad_top
snake_case__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_A , _A , 'constant' , 0.0 )
class __SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self: Dict , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ) -> None:
super().__init__()
snake_case__ = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
snake_case__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
snake_case__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode='zeros' , )
if use_normalization:
snake_case__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
snake_case__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
snake_case__ = ACTaFN[config.hidden_act]
else:
snake_case__ = config.hidden_act
else:
snake_case__ = None
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
snake_case__ = apply_tf_padding(UpperCamelCase , self.convolution )
snake_case__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
snake_case__ = self.normalization(UpperCamelCase )
if self.activation is not None:
snake_case__ = self.activation(UpperCamelCase )
return features
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = MobileNetVaConfig
_UpperCAmelCase = load_tf_weights_in_mobilenet_va
_UpperCAmelCase = "mobilenet_v1"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: str , UpperCamelCase: Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__UpperCamelCase : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__UpperCamelCase : str = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a_ , )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ) -> Tuple:
super().__init__(UpperCamelCase )
snake_case__ = config
snake_case__ = 32
snake_case__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
snake_case__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
snake_case__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
snake_case__ = nn.ModuleList()
for i in range(13 ):
snake_case__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
snake_case__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
snake_case__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict ) -> Union[str, Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
snake_case__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case__ = self.conv_stem(UpperCamelCase )
snake_case__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
snake_case__ = layer_module(UpperCamelCase )
if output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
snake_case__ = hidden_states
if self.pooler is not None:
snake_case__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
snake_case__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: List[Any] , UpperCamelCase: MobileNetVaConfig ) -> None:
super().__init__(UpperCamelCase )
snake_case__ = config.num_labels
snake_case__ = MobileNetVaModel(UpperCamelCase )
snake_case__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
snake_case__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
snake_case__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
snake_case__ = outputs.pooler_output if return_dict else outputs[1]
snake_case__ = self.classifier(self.dropout(UpperCamelCase ) )
snake_case__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ = 'single_label_classification'
else:
snake_case__ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case__ = MSELoss()
if self.num_labels == 1:
snake_case__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ = BCEWithLogitsLoss()
snake_case__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
snake_case__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
__UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = cn.convert_to_negative(_A )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ) -> int:
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_A , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ) -> List[str]:
"""simple docstring"""
snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case__ = canny.canny(_A )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ) -> Optional[int]:
"""simple docstring"""
assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all()
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
# laplace diagonals
snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case__ = conv.img_convolve(_A , _A ).astype(_A )
assert res.any()
def a_ ( ) -> Dict:
"""simple docstring"""
assert med.median_filter(_A , 3 ).any()
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ , snake_case__ = sob.sobel_filter(_A )
assert grad.any() and theta.any()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = sp.make_sepia(_A , 20 )
assert sepia.all()
def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]:
"""simple docstring"""
snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def a_ ( ) -> Any:
"""simple docstring"""
snake_case__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
snake_case__ = imread(_A , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case__ = 0
snake_case__ = 0
snake_case__ = image[x_coordinate][y_coordinate]
snake_case__ = lbp.get_neighbors_pixel(
_A , _A , _A , _A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case__ = lbp.local_binary_value(_A , _A , _A )
assert lbp_image.any()
| 307
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCAmelCase_ : Union[str, Any] = str(bin(A__ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCAmelCase_ : List[str] = str(bin(A__ ) )[2:]
if shift_amount >= len(A__ ):
return "0b0"
lowerCAmelCase_ : Union[str, Any] = binary_number[: len(A__ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowerCAmelCase_ : str = """0""" + str(bin(A__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCAmelCase_ : List[Any] = len(bin(A__ )[3:] ) # Find 2's complement of number
lowerCAmelCase_ : List[Any] = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase_ : Dict = (
"""1""" + """0""" * (binary_number_length - len(A__ )) + binary_number
)
if shift_amount >= len(A__ ):
return "0b" + binary_number[0] * len(A__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple=False ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Optional[Any] = """"""
else:
lowerCAmelCase_ : Optional[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = dct.pop(A__ )
lowerCAmelCase_ : Tuple = val
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ViTConfig()
lowerCAmelCase_ : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = int(vit_name[-12:-10] )
lowerCAmelCase_ : Optional[int] = int(vit_name[-9:-6] )
else:
lowerCAmelCase_ : Optional[int] = 10_00
lowerCAmelCase_ : Tuple = """huggingface/label-files"""
lowerCAmelCase_ : Any = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Union[str, Any] = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = int(vit_name[-6:-4] )
lowerCAmelCase_ : Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowerCAmelCase_ : int = 1_92
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : List[str] = 12
lowerCAmelCase_ : int = 3
elif vit_name[9:].startswith("""small""" ):
lowerCAmelCase_ : Optional[Any] = 3_84
lowerCAmelCase_ : Optional[int] = 15_36
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : str = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Any = 23_04
lowerCAmelCase_ : List[str] = 8
lowerCAmelCase_ : List[str] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowerCAmelCase_ : Dict = 10_24
lowerCAmelCase_ : List[Any] = 40_96
lowerCAmelCase_ : Any = 24
lowerCAmelCase_ : List[str] = 16
elif vit_name[4:].startswith("""huge""" ):
lowerCAmelCase_ : Optional[int] = 12_80
lowerCAmelCase_ : Dict = 51_20
lowerCAmelCase_ : Union[str, Any] = 32
lowerCAmelCase_ : Optional[int] = 16
# load original model from timm
lowerCAmelCase_ : Union[str, Any] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
lowerCAmelCase_ : str = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = ViTModel(A__ ).eval()
else:
lowerCAmelCase_ : Optional[int] = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase_ : Any = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase_ : Any = ViTImageProcessor(size=config.image_size )
lowerCAmelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = encoding["""pixel_values"""]
lowerCAmelCase_ : int = model(A__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase_ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 89
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self )-> List[str]:
lowerCamelCase_ , lowerCamelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCamelCase_ ="""A painting of a squirrel eating a burger"""
lowerCamelCase_ =jax.device_count()
lowerCamelCase_ =num_samples * [prompt]
lowerCamelCase_ =sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =replicate(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =shard(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.random.PRNGKey(0 )
lowerCamelCase_ =jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCamelCase_ =sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ =images[0, 253:256, 253:256, -1]
lowerCamelCase_ =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""stabilityai/stable-diffusion-2"""
lowerCamelCase_ , lowerCamelCase_ =FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
lowerCamelCase_ , lowerCamelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCamelCase_ =scheduler_params
lowerCamelCase_ ="""A painting of a squirrel eating a burger"""
lowerCamelCase_ =jax.device_count()
lowerCamelCase_ =num_samples * [prompt]
lowerCamelCase_ =sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =replicate(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =shard(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.random.PRNGKey(0 )
lowerCamelCase_ =jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCamelCase_ =sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ =images[0, 253:256, 253:256, -1]
lowerCamelCase_ =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 154
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = ["image_processor", "tokenizer"]
_UpperCamelCase:Tuple = "ChineseCLIPImageProcessor"
_UpperCamelCase:List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =kwargs.pop("""feature_extractor""" )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase_ =self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCamelCase_ =self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Any:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self )-> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 154
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
__UpperCAmelCase = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__UpperCAmelCase = BASE_URL + """/user"""
# https://github.com/settings/tokens
__UpperCAmelCase = os.environ.get("""USER_TOKEN""", """""")
def snake_case_ (__A : str ) -> dict[Any, Any]:
__lowerCAmelCase : List[str] = {
"""Authorization""": f'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__A , headers=__A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 139
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ () -> str:
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase : List[Any] = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase : Any = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase : Any = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase : Union[str, Any] = json.loads(__A )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase : Optional[Any] = torch.device("""cpu""" )
__lowerCAmelCase : Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase : int = smp.local_rank()
__lowerCAmelCase : str = torch.device("""cuda""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase : List[str] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase : Tuple = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase : List[str] = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase : int = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
return False
| 139
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1000 , ) -> Dict:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = range_bbox
def _snake_case ( self ) -> str:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase = bbox[i, j, 3]
_lowerCAmelCase = bbox[i, j, 1]
_lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase = bbox[i, j, 2]
_lowerCAmelCase = bbox[i, j, 0]
_lowerCAmelCase = t
_lowerCAmelCase = tf.convert_to_tensor(_lowerCAmelCase )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = TFLayoutLMModel(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Any = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Tuple = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = 10
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFLayoutLMModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _snake_case ( self ) -> Tuple:
pass
def __a():
'''simple docstring'''
_lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
_lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowerCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowerCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
# initialize model with randomly initialized sequence classification head
_lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowerCAmelCase = outputs.logits
_lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def _snake_case ( self ) -> int:
# initialize model with randomly initialized token classification head
_lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowerCAmelCase = outputs.logits
_lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def _snake_case ( self ) -> Dict:
# initialize model with randomly initialized token classification head
_lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 158
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PIL.Image.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(_lowerCAmelCase , data_format=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int:
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a_ , )
super().__init__(*a_ , **a_ )
| 24
|
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase , __lowerCamelCase : List[Any] = 1, 1
__lowerCamelCase : Optional[Any] = []
for i in range(1 , n + 1 ):
__lowerCamelCase : int = prev_numerator + 2 * prev_denominator
__lowerCamelCase : Optional[int] = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = numerator
__lowerCamelCase : List[str] = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
if len(UpperCAmelCase_ ) <= 1:
return arr, 0
__lowerCamelCase : str = len(UpperCAmelCase_ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : List[str] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Any = _count_cross_inversions(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ) and j < len(UpperCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Any = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
# an empty list should also have zero inversions
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__lowercase: Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowercase: Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'whisper'
_lowerCamelCase : Tuple = ['past_key_values']
_lowerCamelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str], a_ : int=5_1865, a_ : Dict=80, a_ : int=6, a_ : Tuple=4, a_ : Dict=6, a_ : str=4, a_ : Optional[Any]=1536, a_ : Optional[Any]=1536, a_ : List[Any]=0.0, a_ : str=0.0, a_ : Union[str, Any]=5_0257, a_ : Any=True, a_ : List[str]=True, a_ : Any="gelu", a_ : Union[str, Any]=256, a_ : Dict=0.0, a_ : Union[str, Any]=0.0, a_ : List[Any]=0.0, a_ : str=0.02, a_ : int=False, a_ : str=1500, a_ : List[str]=448, a_ : List[Any]=5_0256, a_ : Dict=5_0256, a_ : Tuple=5_0256, a_ : Tuple=None, a_ : Tuple=[220, 5_0256], a_ : List[str]=False, a_ : int=256, a_ : int=False, a_ : List[str]=0.05, a_ : Union[str, Any]=10, a_ : List[Any]=2, a_ : Dict=0.0, a_ : Any=10, a_ : Any=0, a_ : str=7, **a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_source_positions
UpperCamelCase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
UpperCamelCase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
UpperCamelCase__ = median_filter_width
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, is_encoder_decoder=a_, decoder_start_token_id=a_, suppress_tokens=a_, begin_suppress_tokens=a_, **a_, )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase__ = {0: "batch"}
else:
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_, direction="inputs" )
return common_inputs
def lowercase_ ( self : Union[str, Any], a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional["TensorType"] = None, a_ : int = 2_2050, a_ : float = 5.0, a_ : int = 220, ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=a_, framework=a_, sampling_rate=a_, time_duration=a_, frequency=a_, )
UpperCamelCase__ = encoder_inputs["input_features"].shape[2]
UpperCamelCase__ = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, a_, a_, a_, a_ )
UpperCamelCase__ = encoder_inputs.pop("input_features" )
UpperCamelCase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
UpperCamelCase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 1e-3
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case_ (_a : str ):
UpperCAmelCase = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case_ (_a : Union[str, Any] , _a : str ):
UpperCAmelCase = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case_ (_a : Union[str, Any] ):
UpperCAmelCase = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def snake_case_ ():
UpperCAmelCase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def snake_case_ (_a : Optional[Any] , _a : str , _a : Dict , _a : Optional[Any] ):
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = UpperCAmelCase = CvtConfig(num_labels=_a , idalabel=_a , labelaid=_a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase = [2, 2, 2_0]
UpperCAmelCase = [3, 1_2, 1_6]
UpperCAmelCase = [1_9_2, 7_6_8, 1_0_2_4]
UpperCAmelCase = CvtForImageClassification(_a )
UpperCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase = image_size
UpperCAmelCase = torch.load(_a , map_location=torch.device('''cpu''' ) )
UpperCAmelCase = OrderedDict()
UpperCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase = list_of_state_dict + cls_token(_a )
UpperCAmelCase = list_of_state_dict + embeddings(_a )
for cnt in range(config.depth[idx] ):
UpperCAmelCase = list_of_state_dict + attention(_a , _a )
UpperCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_a )
for i in range(len(_a ) ):
UpperCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 34
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A__ : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase__, config=lowerCAmelCase__)
__lowerCAmelCase = downstream_dict['''projector.weight''']
__lowerCAmelCase = downstream_dict['''projector.bias''']
__lowerCAmelCase = downstream_dict['''model.post_net.linear.weight''']
__lowerCAmelCase = downstream_dict['''model.post_net.linear.bias''']
return model
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase__, config=lowerCAmelCase__)
__lowerCAmelCase = downstream_dict['''model.linear.weight''']
__lowerCAmelCase = downstream_dict['''model.linear.bias''']
return model
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase__, config=lowerCAmelCase__)
__lowerCAmelCase = downstream_dict['''connector.weight''']
__lowerCAmelCase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
__lowerCAmelCase = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__lowerCAmelCase = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowerCAmelCase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCAmelCase__, map_location='''cpu''')
__lowerCAmelCase = checkpoint['''Downstream''']
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__)
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, do_normalize=lowerCAmelCase__)
__lowerCAmelCase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification'''):
__lowerCAmelCase = convert_classification(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
elif arch.endswith('''ForAudioFrameClassification'''):
__lowerCAmelCase = convert_diarization(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
elif arch.endswith('''ForXVector'''):
__lowerCAmelCase = convert_xvector(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""")
if hf_config.use_weighted_layer_sum:
__lowerCAmelCase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCAmelCase__)
hf_model.save_pretrained(lowerCAmelCase__)
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCAmelCase : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 174
|
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( A__: List[str] , A__: Any ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 11
UpperCAmelCase = int('''1''' + '''0''' * digit_len )
for num in range(UpperCAmelCase__ , UpperCAmelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase__ , UpperCAmelCase__ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
UpperCAmelCase = 10
return solutions
def _lowerCAmelCase ( A__: Optional[int] = 2 ):
'''simple docstring'''
UpperCAmelCase = 1.0
for fraction in fraction_list(UpperCAmelCase__ ):
UpperCAmelCase = Fraction(UpperCAmelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 363
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def _lowerCAmelCase ( A__: Path , A__: list ):
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(A__ )
Path(A__ ).open('''w''' ).writelines(A__ )
__magic_name__ = "patrickvonplaten/t5-tiny-random"
__magic_name__ = "sshleifer/bart-tiny-random"
__magic_name__ = "sshleifer/tiny-mbart"
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 152
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __A( __lowerCAmelCase ):
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
A : Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __A( __lowerCAmelCase ):
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> str:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase_ , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__a = tokenizer
__a = AutoTokenizer.from_pretrained('''gpt2''' )
__a = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
__a = self.char_tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a , __a , __a = sequences
__a = char_preds.size(0 )
__a , __a = self._decode_helper(lowerCamelCase_ , '''char''' )
__a , __a = self._decode_helper(lowerCamelCase_ , '''bpe''' )
__a , __a = self._decode_helper(lowerCamelCase_ , '''wp''' )
__a = []
__a = []
for i in range(lowerCamelCase_ ):
__a = [char_scores[i], bpe_scores[i], wp_scores[i]]
__a = [char_strs[i], bpe_strs[i], wp_strs[i]]
__a = scores.index(max(lowerCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__a = {}
__a = final_strs
__a = final_scores
__a = char_strs
__a = bpe_strs
__a = wp_strs
return out
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
if format == DecodeType.CHARACTER:
__a = self.char_decode
__a = 1
__a = '''[s]'''
elif format == DecodeType.BPE:
__a = self.bpe_decode
__a = 2
__a = '''#'''
elif format == DecodeType.WORDPIECE:
__a = self.wp_decode
__a = 102
__a = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
__a , __a = [], []
__a = pred_logits.size(0 )
__a = pred_logits.size(1 )
__a , __a = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase_ , sorted=lowerCamelCase_ )
__a = preds_index.view(-1 , lowerCamelCase_ )[:, 1:]
__a = decoder(lowerCamelCase_ )
__a , __a = torch.nn.functional.softmax(lowerCamelCase_ , dim=2 ).max(dim=2 )
__a = preds_max_prob[:, 1:]
for index in range(lowerCamelCase_ ):
__a = preds_str[index].find(lowerCamelCase_ )
__a = preds_str[index][:pred_eos]
__a = preds_index[index].cpu().tolist()
__a = pred_index.index(lowerCamelCase_ ) if eos_token in pred_index else -1
__a = preds_max_prob[index][: pred_eos_index + 1]
__a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase_ )
conf_scores.append(lowerCamelCase_ )
return dec_strs, conf_scores
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
| 6
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343
| 0
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__snake_case : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
SCREAMING_SNAKE_CASE = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
SCREAMING_SNAKE_CASE = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.task_name.lower()
class A__ ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'train'
SCREAMING_SNAKE_CASE = 'dev'
SCREAMING_SNAKE_CASE = 'test'
class A__ ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: GlueDataTrainingArguments , _SCREAMING_SNAKE_CASE: PreTrainedTokenizerBase , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Union[str, Split] = Split.train , _SCREAMING_SNAKE_CASE: Optional[str] = None , ) -> Dict:
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowercase__ , )
__lowerCAmelCase : Optional[int] = args
__lowerCAmelCase : Tuple = glue_processors[args.task_name]()
__lowerCAmelCase : str = glue_output_modes[args.task_name]
if isinstance(lowercase__ , lowercase__):
try:
__lowerCAmelCase : int = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
__lowerCAmelCase : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowerCAmelCase : Union[str, Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase : int = label_list[2], label_list[1]
__lowerCAmelCase : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase : Tuple = cached_features_file + ".lock"
with FileLock(lowercase__):
if os.path.exists(lowercase__) and not args.overwrite_cache:
__lowerCAmelCase : Tuple = time.time()
__lowerCAmelCase : Tuple = torch.load(lowercase__)
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
__lowerCAmelCase : Optional[int] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
__lowerCAmelCase : int = self.processor.get_test_examples(args.data_dir)
else:
__lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
__lowerCAmelCase : int = examples[:limit_length]
__lowerCAmelCase : List[str] = glue_convert_examples_to_features(
lowercase__ , lowercase__ , max_length=args.max_seq_length , label_list=lowercase__ , output_mode=self.output_mode , )
__lowerCAmelCase : Union[str, Any] = time.time()
torch.save(self.features , lowercase__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self: Optional[int]) -> List[str]:
"""simple docstring"""
return len(self.features)
def __getitem__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
return self.label_list
| 358
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE) , labels=labels.to(_SCREAMING_SNAKE_CASE)).loss
__lowerCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 58
| 0
|
def __snake_case ( _UpperCAmelCase ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__a = len(_UpperCAmelCase )
__a = max(_UpperCAmelCase )
__a = min(_UpperCAmelCase )
# create the counting array
__a = coll_max + 1 - coll_min
__a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCAmelCase ):
__a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCAmelCase ) ):
__a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case ( _UpperCAmelCase ):
return "".join([chr(_UpperCAmelCase ) for i in counting_sort([ord(_UpperCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__snake_case :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 49
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_SCREAMING_SNAKE_CASE = data_utils.TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = data_utils.TransfoXLCorpus
_SCREAMING_SNAKE_CASE = data_utils
_SCREAMING_SNAKE_CASE = data_utils
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase_ , """rb""" ) as fp:
UpperCamelCase = pickle.load(UpperCamelCase_ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCamelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase_ )
UpperCamelCase = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase = os.path.abspath(UpperCamelCase_ )
UpperCamelCase = os.path.abspath(UpperCamelCase_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase = TransfoXLConfig()
else:
UpperCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = TransfoXLLMHeadModel(UpperCamelCase_ )
UpperCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
UpperCamelCase = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
print(f"""Save PyTorch model to {os.path.abspath(UpperCamelCase_ )}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
print(f"""Save configuration file to {os.path.abspath(UpperCamelCase_ )}""" )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 165
|
import argparse
_SCREAMING_SNAKE_CASE = """docs/source/_static/js/custom.js"""
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCamelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
UpperCamelCase = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
update_custom_js(args.version)
| 165
| 1
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Union[str, Any]):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , __UpperCAmelCase , )
super().__init__(args=__UpperCAmelCase , **__UpperCAmelCase)
| 40
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
_lowerCAmelCase ={}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCAmelCase =[[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_lowerCAmelCase =[]
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> Optional[int]:
if c == -1:
_lowerCAmelCase =floor(random() * 1_00_00 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_lowerCAmelCase =floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_lowerCAmelCase =deque()
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_lowerCAmelCase =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return len(self.graph[u] )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Dict:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_lowerCAmelCase =time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> str:
_lowerCAmelCase =time()
self.bfs(__UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Any:
_lowerCAmelCase ={}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCAmelCase =[[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCAmelCase =[[w, u]]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> List[Any]:
if s == d:
return []
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> str:
if c == -1:
_lowerCAmelCase =floor(random() * 1_00_00 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_lowerCAmelCase =floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_lowerCAmelCase =deque()
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_lowerCAmelCase =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
return len(self.graph[u] )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _lowerCAmelCase ( self ) -> List[Any]:
return list(self.graph )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[Any]:
_lowerCAmelCase =time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_lowerCAmelCase =time()
self.bfs(__UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
| 341
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ =['''image_processor''', '''tokenizer''']
lowerCamelCase__ ='''LayoutLMv2ImageProcessor'''
lowerCamelCase__ =('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__(self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__snake_case : List[Any] = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__snake_case : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__snake_case : Tuple = features['words']
__snake_case : Any = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__snake_case : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__snake_case : Optional[Any] = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__snake_case : Tuple = images
return encoded_inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(_a )} and {len(_a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 102
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowerCamelCase : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowerCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowerCamelCase : Union[str, Any] = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
_lowerCamelCase : str = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
_lowerCamelCase : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
_lowerCamelCase : Any = np.expand_dims(test_image, axis=0)
_lowerCamelCase : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowerCamelCase : Optional[Any] = '''Normal'''
if result[0][0] == 1:
_lowerCamelCase : Optional[int] = '''Abnormality detected'''
| 130
|
def a_ ( __lowercase : int = 50_000_000 ) -> int:
_snake_case = set()
_snake_case = int((limit - 24) ** (1 / 2) )
_snake_case = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
_snake_case = primea * primea
for primea in primes:
_snake_case = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case = primea * primea * primea * primea
_snake_case = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F'{solution() = }')
| 130
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : Union[str, Any] , a__ : List[str] , a__ : Dict=7 , a__ : List[Any]=3 , a__ : Any=18 , a__ : Union[str, Any]=30 , a__ : List[Any]=400 , a__ : Dict=True , a__ : int=None , a__ : int=True , a__ : List[str]=None , a__ : Optional[int]=True , ):
"""simple docstring"""
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def a (self : Any ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = MobileViTImageProcessor if is_vision_available() else None
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = MobileViTImageProcessingTester(self )
@property
def a (self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_flip_channel_order''' ) )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def a (self : Dict ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24
| 1
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
_SCREAMING_SNAKE_CASE = object()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Any = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__a ) - len(__a ) + 1 ):
snake_case_ : List[str] = [x.match(__a ) for x, y in zip(__a , ks[i:] )]
if matches and all(__a ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( __a ):
def replace(__a , __a ):
for rule, replacement in rules:
if _match(__a , __a ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __a )),
(("transformer", "wte", "embedding"), P('mp' , __a )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__a , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __a )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__a , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __a )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[int] = _get_partition_rules()
snake_case_ : Any = _replacement_rules(__a )
snake_case_ : int = {k: _unmatched for k in flatten_dict(__a )}
snake_case_ : Union[str, Any] = {k: replace(__a , __a ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__a ) )
| 88
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = k_size // 2
snake_case_ ,snake_case_ : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case_ : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) )
return g
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ ,snake_case_ : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
snake_case_ : int = height - k_size + 1
snake_case_ : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case_ : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
snake_case_ : Tuple = 0
for i, j in product(range(__a ) , range(__a ) ):
snake_case_ : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
snake_case_ : str = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case_ : List[Any] = gen_gaussian_kernel(__a , __a )
snake_case_ : str = ravel(__a )
# reshape and get the dst image
snake_case_ : Optional[int] = dot(__a , __a ).reshape(__a , __a ).astype(__a )
return dst
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1)
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 88
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Dict = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """xlm"""
lowerCAmelCase_ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , A_=30145 , A_=2048 , A_=12 , A_=16 , A_=0.1 , A_=0.1 , A_=True , A_=False , A_=False , A_=False , A_=1 , A_=True , A_=512 , A_=2048**-0.5 , A_=1e-12 , A_=0.02 , A_=0 , A_=1 , A_=2 , A_=3 , A_=5 , A_=True , A_="first" , A_=True , A_=None , A_=True , A_=0.1 , A_=5 , A_=5 , A_=0 , A_=0 , A_=2 , A_=0 , **A_ , )-> str:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = emb_dim
UpperCamelCase = n_layers
UpperCamelCase = n_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = gelu_activation
UpperCamelCase = sinusoidal_embeddings
UpperCamelCase = causal
UpperCamelCase = asm
UpperCamelCase = n_langs
UpperCamelCase = use_lang_emb
UpperCamelCase = layer_norm_eps
UpperCamelCase = bos_index
UpperCamelCase = eos_index
UpperCamelCase = pad_index
UpperCamelCase = unk_index
UpperCamelCase = mask_index
UpperCamelCase = is_encoder
UpperCamelCase = max_position_embeddings
UpperCamelCase = embed_init_std
UpperCamelCase = init_std
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_proj_to_labels
UpperCamelCase = summary_first_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = mask_token_id
UpperCamelCase = lang_id
if "n_words" in kwargs:
UpperCamelCase = kwargs['n_words']
super().__init__(pad_token_id=A_ , bos_token_id=A_ , **A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 251
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def A_( A : list[int] , A : list[int] , A : int):
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A):
UpperCamelCase = burst_time[i]
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCamelCase = []
UpperCamelCase = -1
for i in range(A):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A)
if len(A) > 0:
UpperCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCamelCase = i
total_time += burst_time[target_process]
completed += 1
UpperCamelCase = 0
UpperCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A_( A : list[int] , A : int , A : list[int]):
UpperCamelCase = [0] * no_of_processes
for i in range(A):
UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCAmelCase : int = 4
lowerCAmelCase : Any = [2, 5, 3, 7]
lowerCAmelCase : int = [0, 0, 0, 0]
lowerCAmelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 251
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-roberta-xl'
def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 100
| 1
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _A ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[Any] = ort.SessionOptions()
UpperCamelCase :Any = False
return options
def _A ( self : str ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase :int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase :Any = '''A red cat sitting on a park bench'''
UpperCamelCase :Optional[int] = np.random.RandomState(0 )
UpperCamelCase :str = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type="""np""" , )
UpperCamelCase :Tuple = output.images
UpperCamelCase :Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase :List[str] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self : int ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase :Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase :List[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase :List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase :List[str] = '''A red cat sitting on a park bench'''
UpperCamelCase :Dict = np.random.RandomState(0 )
UpperCamelCase :Union[str, Any] = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type="""np""" , )
UpperCamelCase :Tuple = output.images
UpperCamelCase :Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase :str = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 38
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __magic_name__ ( self : str , __lowercase : str , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AudioClassificationPipeline(model=__lowercase , feature_extractor=__lowercase )
# test with a raw waveform
SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((3_40_00,) )
SCREAMING_SNAKE_CASE__ : str =np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =examples
SCREAMING_SNAKE_CASE__ : str =audio_classifier(__lowercase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =audio_classifier(__lowercase , top_k=1 )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
self.run_torchaudio(__lowercase )
@require_torchaudio
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> Optional[Any]:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE__ : Optional[int] =datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
SCREAMING_SNAKE_CASE__ : int =dataset[0]['''audio''']['''array''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =audio_classifier(__lowercase )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
@require_torch
def __magic_name__ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict ='''anton-l/wav2vec2-random-tiny-classifier'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipeline('''audio-classification''' , model=__lowercase )
SCREAMING_SNAKE_CASE__ : str =np.ones((80_00,) )
SCREAMING_SNAKE_CASE__ : List[str] =audio_classifier(__lowercase , top_k=4 )
SCREAMING_SNAKE_CASE__ : Dict =[
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
SCREAMING_SNAKE_CASE__ : List[str] =[
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE__ : List[str] ={'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE__ : Tuple =audio_classifier(__lowercase , top_k=4 )
self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __magic_name__ ( self : Dict ) -> Any:
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''superb/wav2vec2-base-superb-ks'''
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline('''audio-classification''' , model=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : List[str] =np.array(dataset[3]['''speech'''] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : int =audio_classifier(__lowercase , top_k=4 )
self.assertEqual(
nested_simplify(__lowercase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
pass
| 152
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = """▁"""
lowerCamelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
lowerCamelCase_ = {
"""google/pegasus-xsum""": 5_1_2,
}
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( __A ):
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<mask_2>" ,SCREAMING_SNAKE_CASE__="<mask_1>" ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=1_03 ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(__lowercase ,__lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(__lowercase )}, but is'''
f''' {type(__lowercase )}''' )
__SCREAMING_SNAKE_CASE :Tuple = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(__lowercase ) ,self.offset - 1 )
]
if len(set(__lowercase ) ) != len(__lowercase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 ,self.offset )]
__SCREAMING_SNAKE_CASE :Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase ,unk_token=__lowercase ,mask_token=__lowercase ,pad_token=__lowercase ,mask_token_sent=__lowercase ,offset=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowercase ,)
__SCREAMING_SNAKE_CASE :Tuple = mask_token_sent
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_file
__SCREAMING_SNAKE_CASE :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# add special tokens to encoder dict
__SCREAMING_SNAKE_CASE :List[Any] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 ,self.offset - 1 )} )
__SCREAMING_SNAKE_CASE :str = {v: k for k, v in self.encoder.items()}
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE :Optional[int] = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.encode(__lowercase ,out_type=__lowercase )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__SCREAMING_SNAKE_CASE :Any = self.sp_model.piece_to_id(__lowercase )
return sp_id + self.offset
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
__SCREAMING_SNAKE_CASE :Dict = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Tuple:
"""simple docstring"""
return 1
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__lowercase )
elif token_ids_a is None:
return self._special_token_mask(__lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> str:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE :int = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 359
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCamelCase_ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
lowerCamelCase_ = 0
for log in Path().glob("*.log"):
lowerCamelCase_ = 0
with open(log, "r") as f:
for line in f:
lowerCamelCase_ = json.loads(line)
if line.get("nodeid", "") != "":
lowerCamelCase_ = line["nodeid"]
if line.get("duration", None) is not None:
lowerCamelCase_ = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ = []
log.unlink()
lowerCamelCase_ = ""
lowerCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ = []
lowerCamelCase_ = {}
for test in failed_tests:
lowerCamelCase_ = test[0].split("::")
lowerCamelCase_ = data[0].split("/")[-1]
if data[0] not in filesafailed:
lowerCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ = [test[0] for test in failed_table]
lowerCamelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase_ = "Too many failed tests, please see the full report in the Action results."
lowerCamelCase_ = len(err) + 1_0
lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCamelCase_ = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
lowerCamelCase_ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
lowerCamelCase_ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ = row[0]
else:
lowerCamelCase_ = ""
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 239
| 0
|
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> tuple:
'''simple docstring'''
lowerCamelCase__ = namedtuple('''result''' ,'''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' ,power / current )
elif current == 0:
return result('''current''' ,power / voltage )
elif power == 0:
return result('''power''' ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ = self.block_out_channels[i]
lowerCamelCase__ = self.block_out_channels[i + 1]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = blocks
lowerCamelCase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
for block in self.blocks:
lowerCamelCase__ = block(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
lowerCamelCase__ = controlnet_down_blocks
# mid
lowerCamelCase__ = block_out_channels[-1]
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
lowerCamelCase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
lowerCamelCase__ = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = controlnet_down_block_res_samples
lowerCamelCase__ = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 209
| 1
|
from __future__ import annotations
def A ( _lowercase , _lowercase , _lowercase , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from __future__ import annotations
from math import pi
def A ( _lowercase , _lowercase , _lowercase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
| 0
|
"""simple docstring"""
from typing import Any
class __A :
"""simple docstring"""
def __init__( self , __A ) -> Union[str, Any]:
a =data
a =None
class __A :
"""simple docstring"""
def __init__( self ) -> List[str]:
a =None
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.head
while temp is not None:
print(temp.data , end=''' ''' )
a =temp.next
print()
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
a =Node(__A )
a =self.head
a =new_node
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> str:
if node_data_a == node_data_a:
return
else:
a =self.head
while node_a is not None and node_a.data != node_data_a:
a =node_a.next
a =self.head
while node_a is not None and node_a.data != node_data_a:
a =node_a.next
if node_a is None or node_a is None:
return
a , a =node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 81
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableDiffusionXLImgaImgPipeline
__magic_name__ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :int = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :List[Any] = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Dict = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Any = self.get_dummy_components()
lowerCAmelCase__ :int = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Dict = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Dict = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[int] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :str = 3 * ['this is a negative prompt']
lowerCAmelCase__ :int = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Union[str, Any] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :Dict = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Dict = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :str = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 254
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :int = global_rng
lowerCAmelCase__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=2_0_0_0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=1_6_0 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=4_0_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :int = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :Optional[Any] = min_seq_length
lowerCAmelCase__ :Optional[int] = max_seq_length
lowerCAmelCase__ :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ :Union[str, Any] = padding_value
lowerCAmelCase__ :Optional[int] = sampling_rate
lowerCAmelCase__ :Optional[int] = return_attention_mask
lowerCAmelCase__ :Union[str, Any] = do_normalize
lowerCAmelCase__ :Any = feature_size
lowerCAmelCase__ :Union[str, Any] = chunk_length
lowerCAmelCase__ :List[Any] = hop_length
def snake_case ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
'''simple docstring'''
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowerCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ :Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ :Optional[int] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = WhisperFeatureExtractionTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[Any] = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :int = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = os.path.join(__UpperCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :str = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase__ :int = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ :Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ :List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase__ :Optional[Any] = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :List[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ :Any = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCAmelCase__ :Any = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
lowerCAmelCase__ :str = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ :Union[str, Any] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ :Any = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :Dict = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCAmelCase__ :Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ :str = ds.sort('id' ).select(range(__UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCAmelCase__ :Tuple = self._load_datasamples(1 )
lowerCAmelCase__ :Any = WhisperFeatureExtractor()
lowerCAmelCase__ :List[str] = feature_extractor(__UpperCAmelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :int = self._load_datasamples(1 )[0]
lowerCAmelCase__ :Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ :Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCAmelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase ) - 1 ) < 1E-3 ) )
| 254
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = abs(_lowerCAmelCase )
lowercase__ : Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def a_ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
lowercase__ : str = f"""{func.__name__}({value})"""
lowercase__ : Union[str, Any] = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Any = logging.getLogger(__name__)
_UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a)} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "The input training data file (a text file)."})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."})
lowerCamelCase__ : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
lowerCamelCase__ : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowerCamelCase__ : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
lowerCamelCase__ : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def a_ ( _lowerCAmelCase : DataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ):
'''simple docstring'''
def _dataset(_lowerCAmelCase : Any , _lowerCAmelCase : Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowercase__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowercase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowercase__ : int = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowercase__ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Tuple = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : Optional[Any] = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : str = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Dict = trainer.evaluate()
lowercase__ : List[Any] = math.exp(eval_output['eval_loss'] )
lowercase__ : int = {'perplexity': perplexity}
lowercase__ : int = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 77
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv3ImageProcessor"""
__UpperCamelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self :Union[str, Any] , lowercase_ :Optional[Any]=None , lowercase_ :List[str]=None , **lowercase_ :str ) -> Union[str, Any]:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Tuple , lowercase_ :List[str] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :Dict ) -> Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Dict , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[str] , *lowercase_ :Tuple , **lowercase_ :Optional[int] ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 181
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = 1.0e4 , lowercase_ = False , lowercase_ = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase = float(embedding_dim // 2 )
UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(lowercase_ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase = jnp.expand_dims(lowercase_ , 1 ) * jnp.expand_dims(lowercase_ , 0 )
# scale embeddings
UpperCAmelCase = scale * emb
if flip_sin_to_cos:
UpperCAmelCase = jnp.concatenate([jnp.cos(lowercase_ ), jnp.sin(lowercase_ )] , axis=1 )
else:
UpperCAmelCase = jnp.concatenate([jnp.sin(lowercase_ ), jnp.cos(lowercase_ )] , axis=1 )
UpperCAmelCase = jnp.reshape(lowercase_ , [jnp.shape(lowercase_ )[0], embedding_dim] )
return signal
class A_ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 32
__UpperCamelCase = jnp.floataa
@nn.compact
def __call__( self :Union[str, Any] , lowercase_ :Tuple ) -> str:
UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(lowercase_ )
UpperCAmelCase = nn.silu(lowercase_ )
UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(lowercase_ )
return temb
class A_ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 32
__UpperCamelCase = False
__UpperCamelCase = 1
@nn.compact
def __call__( self :Any , lowercase_ :int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
lowercase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 181
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
create_all_state(1, _lowerCAmelCase, _lowerCAmelCase, [], _lowerCAmelCase )
return result
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple, a_: str, a_: Tuple, ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase, total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1, _lowerCAmelCase, level - 1, _lowerCAmelCase, _lowerCAmelCase )
current_list.pop()
def __UpperCAmelCase ( a_: Tuple ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
__a = 4
__a = 2
__a = generate_all_combinations(n, k)
print_all_state(total_list)
| 145
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Tuple = TypeVar("""T""")
UpperCAmelCase : List[Any] = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : Any = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : str = Union[str, bytes, os.PathLike]
| 369
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = torch.exp(SCREAMING_SNAKE_CASE )
snake_case_ = torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
snake_case_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
snake_case_ = config.output_attentions
snake_case_ = config.output_hidden_states
snake_case_ = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ = x
else:
snake_case_ = x
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
snake_case_ = ()
snake_case_ = ()
snake_case_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = layer_outputs[0]
if self.output_attentions:
snake_case_ = all_attentions + (layer_outputs[1],)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = current_outputs + (all_attentions,)
snake_case_ = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
snake_case_ = highway_exit[0]
snake_case_ = entropy(_UpperCAmelCase )
snake_case_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
snake_case_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = outputs + (all_attentions,)
snake_case_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , lowerCamelCase__ , )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
snake_case_ = config
snake_case_ = BertEmbeddings(_UpperCAmelCase )
snake_case_ = DeeBertEncoder(_UpperCAmelCase )
snake_case_ = BertPooler(_UpperCAmelCase )
self.init_weights()
def UpperCamelCase__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase__ ( self ):
return self.embeddings.word_embeddings
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = value
def UpperCamelCase__ ( self , _UpperCAmelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case_ = input_ids.size()
elif inputs_embeds is not None:
snake_case_ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
snake_case_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
snake_case_ = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ = encoder_attention_mask[:, None, None, :]
snake_case_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
snake_case_ = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
snake_case_ = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(_UpperCAmelCase )
snake_case_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = message
snake_case_ = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
snake_case_ = BertPooler(_UpperCAmelCase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
# Pooler
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
snake_case_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ = bmodel_output[1]
snake_case_ = self.dropout(_UpperCAmelCase )
snake_case_ = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , lowerCamelCase__ , )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeBertModel(_UpperCAmelCase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ):
snake_case_ = self.num_layers
try:
snake_case_ = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ = outputs[1]
snake_case_ = self.dropout(_UpperCAmelCase )
snake_case_ = self.classifier(_UpperCAmelCase )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(_UpperCAmelCase )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 267
| 0
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a : str = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : List[str] = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : Any = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a : Any = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : str = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase : Any = False
# source code of `config_class`
UpperCAmelCase : str = inspect.getsource(_A )
UpperCAmelCase : Any = _re_checkpoint.findall(_A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase : Tuple = True
break
UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
UpperCAmelCase : Dict = """\n""".join(sorted(_A ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 265
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: List[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[str] = ["ConditionalDetrFeatureExtractor"]
__magic_name__: int = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__magic_name__: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=0.9 , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ):
__a = size if size is not None else {'''shortest_edge''': 30}
__a = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize_and_center_crop
__a = size
__a = crop_pct
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def __UpperCAmelCase ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''crop_pct''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 45
|
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution())
| 76
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Optional[int] = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" ) -> List[str]:
A__ : int = AutoTokenizer.from_pretrained(lowercase_ )
A__ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : int = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int] ) -> int:
model.eval()
A__ : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Any = model(**lowercase_ )
A__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
A__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase (lowercase_: List[Any] , lowercase_: str ) -> List[str]:
# Initialize accelerator
A__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Union[str, Any] = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : Tuple = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : Optional[Any] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[int] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : Any = 0
A__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
A__ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
A__ : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ : Any = int(lowercase_ ) + 1
A__ : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print("""resumed checkpoint performance:""" , lowercase_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
A__ : int = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ : Optional[Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : int = model(**lowercase_ )
A__ : int = outputs.loss
A__ : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ : Any = f"""epoch_{epoch}"""
A__ : int = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
A__ : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Tuple = accuracy
A__ : Optional[Any] = lr_scheduler.get_lr()[0]
A__ : Tuple = optimizer.param_groups[0]["""lr"""]
A__ : int = epoch
A__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> int:
A__ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase_ , default=lowercase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase_ , default=lowercase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=2 , help="""Number of train epochs.""" , )
A__ : List[str] = parser.parse_args()
A__ : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 141
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 1
|
import os
import numpy
import onnx
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_: int = b.name
SCREAMING_SNAKE_CASE_: Any = ""
SCREAMING_SNAKE_CASE_: Tuple = ""
SCREAMING_SNAKE_CASE_: List[Any] = a == b
SCREAMING_SNAKE_CASE_: Any = name_a
SCREAMING_SNAKE_CASE_: Optional[int] = name_b
return res
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCAmelCase , _UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _UpperCAmelCase , _UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for n in graph_proto.node:
_node_replace_input_with(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_: Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_: Optional[Any] = inits[i].name
SCREAMING_SNAKE_CASE_: Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.dirname(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = os.path.basename(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = onnx.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Any = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_: Optional[Any] = set()
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: int = 0
for i in range(len(_UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_UpperCAmelCase )
dup_set.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = inits[j].data_type
SCREAMING_SNAKE_CASE_: Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _UpperCAmelCase )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_: Optional[int] = inits[i].name
SCREAMING_SNAKE_CASE_: Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
SCREAMING_SNAKE_CASE_: List[Any] = sorted(_UpperCAmelCase )
_remove_dup_initializers_from_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = "optimized_" + model_file_name
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
onnx.save(_UpperCAmelCase , _UpperCAmelCase )
return new_model
| 13
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_ : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase_ : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'lower newer'
UpperCamelCase_ : Optional[int] = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_ : List[str] = 'lower'
UpperCamelCase_ : Optional[int] = ['low', 'er</w>']
UpperCamelCase_ : Optional[Any] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase_ : List[Any] = tokens + ['<unk>']
UpperCamelCase_ : int = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase_ : int = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
UpperCamelCase_ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
UpperCamelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCamelCase_ : Any = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 175
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = """lilt"""
def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio
lowerCAmelCase_ : Dict = max_ad_position_embeddings
| 161
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
lowerCAmelCase_ :Any = self.diffusers_dir
shutil.copy(
os.path.join(_A , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ :Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ :Tuple = black.format_str(_A , mode=_A )
lowerCAmelCase_ :Union[str, Any] = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(_A , """w""" , newline="""\n""" ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , """r""" ) as f:
self.assertTrue(f.read() , _A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self ) -> str:
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _A ) , )
# Copy consistency with a really long name
lowerCAmelCase_ :int = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _A , overwrite_result=re.sub("""DDPM""" , """Test""" , _A ) , )
| 84
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327
| 0
|
lowerCAmelCase__ : Tuple ={
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def a__ ( A__ ):
assert type(_lowercase ) in (int, float) and decimal == int(_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(_lowercase )
SCREAMING_SNAKE_CASE_ : Dict = ''
SCREAMING_SNAKE_CASE_ : List[Any] = False
if decimal < 0:
SCREAMING_SNAKE_CASE_ : Tuple = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = divmod(_lowercase, 1_6 )
SCREAMING_SNAKE_CASE_ : Optional[int] = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE_ : Optional[Any] = '0x' + hexadecimal
if negative:
SCREAMING_SNAKE_CASE_ : int = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """focalnet"""
def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=9_6 , lowerCAmelCase__=False , lowerCAmelCase__=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[3, 3, 3, 3] , lowerCAmelCase__="gelu" , lowerCAmelCase__=4.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=False , lowerCAmelCase__=1E-4 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=3_2 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : str = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Any = use_conv_embed
SCREAMING_SNAKE_CASE_ : Dict = hidden_sizes
SCREAMING_SNAKE_CASE_ : Any = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = focal_levels
SCREAMING_SNAKE_CASE_ : Any = focal_windows
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = use_layerscale
SCREAMING_SNAKE_CASE_ : List[Any] = layerscale_value
SCREAMING_SNAKE_CASE_ : List[str] = use_post_layernorm
SCREAMING_SNAKE_CASE_ : Optional[int] = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE_ : str = normalize_modulator
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = encoder_stride
SCREAMING_SNAKE_CASE_ : Dict = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 162
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 82
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ :Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowerCAmelCase__ :Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase__ :Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase__ ( a__: str ) -> List[str]:
'''simple docstring'''
with open(a__ , 'rb' ) as f:
_UpperCAmelCase = Image.open(a__ )
return im.convert('RGB' )
@dataclass
class __a :
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_a : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class __a :
_a : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : bool = field(
default=UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( a__: List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] )
_UpperCAmelCase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase = {}
if data_args.train_dir is not None:
_UpperCAmelCase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
_UpperCAmelCase = os.path.join(data_args.validation_dir , '**' )
_UpperCAmelCase = load_dataset(
'imagefolder' , data_files=a__ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = dataset['train'].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split['train']
_UpperCAmelCase = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase = dataset['train'].features['labels'].names
_UpperCAmelCase , _UpperCAmelCase = {}, {}
for i, label in enumerate(a__ ):
_UpperCAmelCase = str(a__ )
_UpperCAmelCase = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__: Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a__ ) , labelaid=a__ , idalabel=a__ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase = image_processor.size['shortest_edge']
else:
_UpperCAmelCase = (image_processor.size['height'], image_processor.size['width'])
_UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase = Compose(
[
RandomResizedCrop(a__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase = Compose(
[
Resize(a__ ),
CenterCrop(a__ ),
ToTensor(),
normalize,
] )
def train_transforms(a__: str ):
_UpperCAmelCase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(a__: int ):
_UpperCAmelCase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_UpperCAmelCase = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a__ )
# Initalize our trainer
_UpperCAmelCase = Trainer(
model=a__ , args=a__ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=a__ , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , a__ )
trainer.save_metrics('eval' , a__ )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
if __name__ == "__main__":
main()
| 185
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.